Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (96 commits)
apic, x86: Use BIOS settings for IBS and MCE threshold interrupt LVT offsets
apic, x86: Check if EILVT APIC registers are available (AMD only)
x86: ioapic: Call free_irte only if interrupt remapping enabled
arm: Use ARCH_IRQ_INIT_FLAGS
genirq, ARM: Fix boot on ARM platforms
genirq: Fix CONFIG_GENIRQ_NO_DEPRECATED=y build
x86: Switch sparse_irq allocations to GFP_KERNEL
genirq: Switch sparse_irq allocator to GFP_KERNEL
genirq: Make sparse_lock a mutex
x86: lguest: Use new irq allocator
genirq: Remove the now unused sparse irq leftovers
genirq: Sanitize dynamic irq handling
genirq: Remove arch_init_chip_data()
x86: xen: Sanitise sparse_irq handling
x86: Use sane enumeration
x86: uv: Clean up the direct access to irq_desc
x86: Make io_apic.c local functions static
genirq: Remove irq_2_iommu
x86: Speed up the irq_remapped check in hot pathes
intr_remap: Simplify the code further
...

Fix up trivial conflicts in arch/x86/Kconfig

+2177 -2268
+52 -32
Documentation/DocBook/genericirq.tmpl
··· 28 28 </authorgroup> 29 29 30 30 <copyright> 31 - <year>2005-2006</year> 31 + <year>2005-2010</year> 32 32 <holder>Thomas Gleixner</holder> 33 33 </copyright> 34 34 <copyright> ··· 100 100 <listitem><para>Edge type</para></listitem> 101 101 <listitem><para>Simple type</para></listitem> 102 102 </itemizedlist> 103 + During the implementation we identified another type: 104 + <itemizedlist> 105 + <listitem><para>Fast EOI type</para></listitem> 106 + </itemizedlist> 103 107 In the SMP world of the __do_IRQ() super-handler another type 104 108 was identified: 105 109 <itemizedlist> ··· 157 153 is still available. This leads to a kind of duality for the time 158 154 being. Over time the new model should be used in more and more 159 155 architectures, as it enables smaller and cleaner IRQ subsystems. 156 + It's deprecated for three years now and about to be removed. 160 157 </para> 161 158 </chapter> 162 159 <chapter id="bugs"> ··· 222 217 <itemizedlist> 223 218 <listitem><para>handle_level_irq</para></listitem> 224 219 <listitem><para>handle_edge_irq</para></listitem> 220 + <listitem><para>handle_fasteoi_irq</para></listitem> 225 221 <listitem><para>handle_simple_irq</para></listitem> 226 222 <listitem><para>handle_percpu_irq</para></listitem> 227 223 </itemizedlist> ··· 239 233 are used by the default flow implementations. 240 234 The following helper functions are implemented (simplified excerpt): 241 235 <programlisting> 242 - default_enable(irq) 236 + default_enable(struct irq_data *data) 243 237 { 244 - desc->chip->unmask(irq); 238 + desc->chip->irq_unmask(data); 245 239 } 246 240 247 - default_disable(irq) 241 + default_disable(struct irq_data *data) 248 242 { 249 - if (!delay_disable(irq)) 250 - desc->chip->mask(irq); 243 + if (!delay_disable(data)) 244 + desc->chip->irq_mask(data); 251 245 } 252 246 253 - default_ack(irq) 247 + default_ack(struct irq_data *data) 254 248 { 255 - chip->ack(irq); 249 + chip->irq_ack(data); 256 250 } 257 251 258 - default_mask_ack(irq) 252 + default_mask_ack(struct irq_data *data) 259 253 { 260 - if (chip->mask_ack) { 261 - chip->mask_ack(irq); 254 + if (chip->irq_mask_ack) { 255 + chip->irq_mask_ack(data); 262 256 } else { 263 - chip->mask(irq); 264 - chip->ack(irq); 257 + chip->irq_mask(data); 258 + chip->irq_ack(data); 265 259 } 266 260 } 267 261 268 - noop(irq) 262 + noop(struct irq_data *data)) 269 263 { 270 264 } 271 265 ··· 284 278 <para> 285 279 The following control flow is implemented (simplified excerpt): 286 280 <programlisting> 287 - desc->chip->start(); 281 + desc->chip->irq_mask(); 288 282 handle_IRQ_event(desc->action); 289 - desc->chip->end(); 283 + desc->chip->irq_unmask(); 290 284 </programlisting> 291 285 </para> 292 - </sect3> 286 + </sect3> 287 + <sect3 id="Default_FASTEOI_IRQ_flow_handler"> 288 + <title>Default Fast EOI IRQ flow handler</title> 289 + <para> 290 + handle_fasteoi_irq provides a generic implementation 291 + for interrupts, which only need an EOI at the end of 292 + the handler 293 + </para> 294 + <para> 295 + The following control flow is implemented (simplified excerpt): 296 + <programlisting> 297 + handle_IRQ_event(desc->action); 298 + desc->chip->irq_eoi(); 299 + </programlisting> 300 + </para> 301 + </sect3> 293 302 <sect3 id="Default_Edge_IRQ_flow_handler"> 294 303 <title>Default Edge IRQ flow handler</title> 295 304 <para> ··· 315 294 The following control flow is implemented (simplified excerpt): 316 295 <programlisting> 317 296 if (desc->status &amp; running) { 318 - desc->chip->hold(); 297 + desc->chip->irq_mask(); 319 298 desc->status |= pending | masked; 320 299 return; 321 300 } 322 - desc->chip->start(); 301 + desc->chip->irq_ack(); 323 302 desc->status |= running; 324 303 do { 325 304 if (desc->status &amp; masked) 326 - desc->chip->enable(); 305 + desc->chip->irq_unmask(); 327 306 desc->status &amp;= ~pending; 328 307 handle_IRQ_event(desc->action); 329 308 } while (status &amp; pending); 330 309 desc->status &amp;= ~running; 331 - desc->chip->end(); 332 310 </programlisting> 333 311 </para> 334 312 </sect3> ··· 362 342 <para> 363 343 The following control flow is implemented (simplified excerpt): 364 344 <programlisting> 365 - desc->chip->start(); 366 345 handle_IRQ_event(desc->action); 367 - desc->chip->end(); 346 + if (desc->chip->irq_eoi) 347 + desc->chip->irq_eoi(); 368 348 </programlisting> 369 349 </para> 370 350 </sect3> ··· 395 375 mechanism. (It's necessary to enable CONFIG_HARDIRQS_SW_RESEND when 396 376 you want to use the delayed interrupt disable feature and your 397 377 hardware is not capable of retriggering an interrupt.) 398 - The delayed interrupt disable can be runtime enabled, per interrupt, 399 - by setting the IRQ_DELAYED_DISABLE flag in the irq_desc status field. 378 + The delayed interrupt disable is not configurable. 400 379 </para> 401 380 </sect2> 402 381 </sect1> ··· 406 387 contains all the direct chip relevant functions, which 407 388 can be utilized by the irq flow implementations. 408 389 <itemizedlist> 409 - <listitem><para>ack()</para></listitem> 410 - <listitem><para>mask_ack() - Optional, recommended for performance</para></listitem> 411 - <listitem><para>mask()</para></listitem> 412 - <listitem><para>unmask()</para></listitem> 413 - <listitem><para>retrigger() - Optional</para></listitem> 414 - <listitem><para>set_type() - Optional</para></listitem> 415 - <listitem><para>set_wake() - Optional</para></listitem> 390 + <listitem><para>irq_ack()</para></listitem> 391 + <listitem><para>irq_mask_ack() - Optional, recommended for performance</para></listitem> 392 + <listitem><para>irq_mask()</para></listitem> 393 + <listitem><para>irq_unmask()</para></listitem> 394 + <listitem><para>irq_retrigger() - Optional</para></listitem> 395 + <listitem><para>irq_set_type() - Optional</para></listitem> 396 + <listitem><para>irq_set_wake() - Optional</para></listitem> 416 397 </itemizedlist> 417 398 These primitives are strictly intended to mean what they say: ack means 418 399 ACK, masking means masking of an IRQ line, etc. It is up to the flow ··· 477 458 <para> 478 459 This chapter contains the autogenerated documentation of the internal functions. 479 460 </para> 461 + !Ikernel/irq/irqdesc.c 480 462 !Ikernel/irq/handle.c 481 463 !Ikernel/irq/chip.c 482 464 </chapter>
+6
MAINTAINERS
··· 3241 3241 F: include/net/irda/ 3242 3242 F: net/irda/ 3243 3243 3244 + IRQ SUBSYSTEM 3245 + M: Thomas Gleixner <tglx@linutronix.de> 3246 + S: Maintained 3247 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git irq/core 3248 + F: kernel/irq/ 3249 + 3244 3250 ISAPNP 3245 3251 M: Jaroslav Kysela <perex@perex.cz> 3246 3252 S: Maintained
+2
arch/arm/include/asm/hw_irq.h
··· 24 24 #define IRQF_PROBE (1 << 1) 25 25 #define IRQF_NOAUTOEN (1 << 2) 26 26 27 + #define ARCH_IRQ_INIT_FLAGS (IRQ_NOREQUEST | IRQ_NOPROBE) 28 + 27 29 #endif
+1 -9
arch/arm/kernel/irq.c
··· 154 154 155 155 void __init init_IRQ(void) 156 156 { 157 - struct irq_desc *desc; 158 - int irq; 159 - 160 - for (irq = 0; irq < nr_irqs; irq++) { 161 - desc = irq_to_desc_alloc_node(irq, 0); 162 - desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE; 163 - } 164 - 165 157 init_arch_irq(); 166 158 } 167 159 ··· 161 169 int __init arch_probe_nr_irqs(void) 162 170 { 163 171 nr_irqs = arch_nr_irqs ? arch_nr_irqs : NR_IRQS; 164 - return 0; 172 + return nr_irqs; 165 173 } 166 174 #endif 167 175
+3 -3
arch/arm/mach-bcmring/irq.c
··· 67 67 } 68 68 69 69 static struct irq_chip bcmring_irq0_chip = { 70 - .typename = "ARM-INTC0", 70 + .name = "ARM-INTC0", 71 71 .ack = bcmring_mask_irq0, 72 72 .mask = bcmring_mask_irq0, /* mask a specific interrupt, blocking its delivery. */ 73 73 .unmask = bcmring_unmask_irq0, /* unmaks an interrupt */ 74 74 }; 75 75 76 76 static struct irq_chip bcmring_irq1_chip = { 77 - .typename = "ARM-INTC1", 77 + .name = "ARM-INTC1", 78 78 .ack = bcmring_mask_irq1, 79 79 .mask = bcmring_mask_irq1, 80 80 .unmask = bcmring_unmask_irq1, 81 81 }; 82 82 83 83 static struct irq_chip bcmring_irq2_chip = { 84 - .typename = "ARM-SINTC", 84 + .name = "ARM-SINTC", 85 85 .ack = bcmring_mask_irq2, 86 86 .mask = bcmring_mask_irq2, 87 87 .unmask = bcmring_unmask_irq2,
+4 -4
arch/arm/mach-iop13xx/msi.c
··· 164 164 static struct irq_chip iop13xx_msi_chip = { 165 165 .name = "PCI-MSI", 166 166 .ack = iop13xx_msi_nop, 167 - .enable = unmask_msi_irq, 168 - .disable = mask_msi_irq, 169 - .mask = mask_msi_irq, 170 - .unmask = unmask_msi_irq, 167 + .irq_enable = unmask_msi_irq, 168 + .irq_disable = mask_msi_irq, 169 + .irq_mask = mask_msi_irq, 170 + .irq_unmask = unmask_msi_irq, 171 171 }; 172 172 173 173 int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
+4 -4
arch/ia64/kernel/msi_ia64.c
··· 104 104 */ 105 105 static struct irq_chip ia64_msi_chip = { 106 106 .name = "PCI-MSI", 107 - .mask = mask_msi_irq, 108 - .unmask = unmask_msi_irq, 107 + .irq_mask = mask_msi_irq, 108 + .irq_unmask = unmask_msi_irq, 109 109 .ack = ia64_ack_msi_irq, 110 110 #ifdef CONFIG_SMP 111 111 .set_affinity = ia64_set_msi_irq_affinity, ··· 160 160 161 161 static struct irq_chip dmar_msi_type = { 162 162 .name = "DMAR_MSI", 163 - .unmask = dmar_msi_unmask, 164 - .mask = dmar_msi_mask, 163 + .irq_unmask = dmar_msi_unmask, 164 + .irq_mask = dmar_msi_mask, 165 165 .ack = ia64_ack_msi_irq, 166 166 #ifdef CONFIG_SMP 167 167 .set_affinity = dmar_msi_set_affinity,
+2 -2
arch/ia64/sn/kernel/msi_sn.c
··· 228 228 229 229 static struct irq_chip sn_msi_chip = { 230 230 .name = "PCI-MSI", 231 - .mask = mask_msi_irq, 232 - .unmask = unmask_msi_irq, 231 + .irq_mask = mask_msi_irq, 232 + .irq_unmask = unmask_msi_irq, 233 233 .ack = sn_ack_msi_irq, 234 234 #ifdef CONFIG_SMP 235 235 .set_affinity = sn_set_msi_irq_affinity,
+1 -1
arch/m32r/kernel/irq.c
··· 51 51 for_each_online_cpu(j) 52 52 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); 53 53 #endif 54 - seq_printf(p, " %14s", irq_desc[i].chip->typename); 54 + seq_printf(p, " %14s", irq_desc[i].chip->name); 55 55 seq_printf(p, " %s", action->name); 56 56 57 57 for (action=action->next; action; action = action->next)
+1 -1
arch/m32r/platforms/m32104ut/setup.c
··· 65 65 66 66 static struct irq_chip m32104ut_irq_type = 67 67 { 68 - .typename = "M32104UT-IRQ", 68 + .name = "M32104UT-IRQ", 69 69 .startup = startup_m32104ut_irq, 70 70 .shutdown = shutdown_m32104ut_irq, 71 71 .enable = enable_m32104ut_irq,
+4 -4
arch/m32r/platforms/m32700ut/setup.c
··· 71 71 72 72 static struct irq_chip m32700ut_irq_type = 73 73 { 74 - .typename = "M32700UT-IRQ", 74 + .name = "M32700UT-IRQ", 75 75 .startup = startup_m32700ut_irq, 76 76 .shutdown = shutdown_m32700ut_irq, 77 77 .enable = enable_m32700ut_irq, ··· 148 148 149 149 static struct irq_chip m32700ut_pld_irq_type = 150 150 { 151 - .typename = "M32700UT-PLD-IRQ", 151 + .name = "M32700UT-PLD-IRQ", 152 152 .startup = startup_m32700ut_pld_irq, 153 153 .shutdown = shutdown_m32700ut_pld_irq, 154 154 .enable = enable_m32700ut_pld_irq, ··· 217 217 218 218 static struct irq_chip m32700ut_lanpld_irq_type = 219 219 { 220 - .typename = "M32700UT-PLD-LAN-IRQ", 220 + .name = "M32700UT-PLD-LAN-IRQ", 221 221 .startup = startup_m32700ut_lanpld_irq, 222 222 .shutdown = shutdown_m32700ut_lanpld_irq, 223 223 .enable = enable_m32700ut_lanpld_irq, ··· 286 286 287 287 static struct irq_chip m32700ut_lcdpld_irq_type = 288 288 { 289 - .typename = "M32700UT-PLD-LCD-IRQ", 289 + .name = "M32700UT-PLD-LCD-IRQ", 290 290 .startup = startup_m32700ut_lcdpld_irq, 291 291 .shutdown = shutdown_m32700ut_lcdpld_irq, 292 292 .enable = enable_m32700ut_lcdpld_irq,
+1 -1
arch/m32r/platforms/mappi/setup.c
··· 65 65 66 66 static struct irq_chip mappi_irq_type = 67 67 { 68 - .typename = "MAPPI-IRQ", 68 + .name = "MAPPI-IRQ", 69 69 .startup = startup_mappi_irq, 70 70 .shutdown = shutdown_mappi_irq, 71 71 .enable = enable_mappi_irq,
+1 -1
arch/m32r/platforms/mappi2/setup.c
··· 72 72 73 73 static struct irq_chip mappi2_irq_type = 74 74 { 75 - .typename = "MAPPI2-IRQ", 75 + .name = "MAPPI2-IRQ", 76 76 .startup = startup_mappi2_irq, 77 77 .shutdown = shutdown_mappi2_irq, 78 78 .enable = enable_mappi2_irq,
+1 -1
arch/m32r/platforms/mappi3/setup.c
··· 72 72 73 73 static struct irq_chip mappi3_irq_type = 74 74 { 75 - .typename = "MAPPI3-IRQ", 75 + .name = "MAPPI3-IRQ", 76 76 .startup = startup_mappi3_irq, 77 77 .shutdown = shutdown_mappi3_irq, 78 78 .enable = enable_mappi3_irq,
+1 -1
arch/m32r/platforms/oaks32r/setup.c
··· 63 63 64 64 static struct irq_chip oaks32r_irq_type = 65 65 { 66 - .typename = "OAKS32R-IRQ", 66 + .name = "OAKS32R-IRQ", 67 67 .startup = startup_oaks32r_irq, 68 68 .shutdown = shutdown_oaks32r_irq, 69 69 .enable = enable_oaks32r_irq,
+3 -3
arch/m32r/platforms/opsput/setup.c
··· 72 72 73 73 static struct irq_chip opsput_irq_type = 74 74 { 75 - .typename = "OPSPUT-IRQ", 75 + .name = "OPSPUT-IRQ", 76 76 .startup = startup_opsput_irq, 77 77 .shutdown = shutdown_opsput_irq, 78 78 .enable = enable_opsput_irq, ··· 149 149 150 150 static struct irq_chip opsput_pld_irq_type = 151 151 { 152 - .typename = "OPSPUT-PLD-IRQ", 152 + .name = "OPSPUT-PLD-IRQ", 153 153 .startup = startup_opsput_pld_irq, 154 154 .shutdown = shutdown_opsput_pld_irq, 155 155 .enable = enable_opsput_pld_irq, ··· 218 218 219 219 static struct irq_chip opsput_lanpld_irq_type = 220 220 { 221 - .typename = "OPSPUT-PLD-LAN-IRQ", 221 + .name = "OPSPUT-PLD-LAN-IRQ", 222 222 .startup = startup_opsput_lanpld_irq, 223 223 .shutdown = shutdown_opsput_lanpld_irq, 224 224 .enable = enable_opsput_lanpld_irq,
+2 -2
arch/m32r/platforms/usrv/setup.c
··· 63 63 64 64 static struct irq_chip mappi_irq_type = 65 65 { 66 - .typename = "M32700-IRQ", 66 + .name = "M32700-IRQ", 67 67 .startup = startup_mappi_irq, 68 68 .shutdown = shutdown_mappi_irq, 69 69 .enable = enable_mappi_irq, ··· 136 136 137 137 static struct irq_chip m32700ut_pld_irq_type = 138 138 { 139 - .typename = "USRV-PLD-IRQ", 139 + .name = "USRV-PLD-IRQ", 140 140 .startup = startup_m32700ut_pld_irq, 141 141 .shutdown = shutdown_m32700ut_pld_irq, 142 142 .enable = enable_m32700ut_pld_irq,
+3 -3
arch/powerpc/platforms/cell/axon_msi.c
··· 310 310 } 311 311 312 312 static struct irq_chip msic_irq_chip = { 313 - .mask = mask_msi_irq, 314 - .unmask = unmask_msi_irq, 315 - .shutdown = unmask_msi_irq, 313 + .irq_mask = mask_msi_irq, 314 + .irq_unmask = unmask_msi_irq, 315 + .irq_shutdown = mask_msi_irq, 316 316 .name = "AXON-MSI", 317 317 }; 318 318
+1 -1
arch/powerpc/platforms/pseries/xics.c
··· 243 243 * at that level, so we do it here by hand. 244 244 */ 245 245 if (irq_to_desc(virq)->msi_desc) 246 - unmask_msi_irq(virq); 246 + unmask_msi_irq(irq_get_irq_data(virq)); 247 247 248 248 /* unmask it */ 249 249 xics_unmask_irq(virq);
+2 -2
arch/powerpc/sysdev/fsl_msi.c
··· 51 51 } 52 52 53 53 static struct irq_chip fsl_msi_chip = { 54 - .mask = mask_msi_irq, 55 - .unmask = unmask_msi_irq, 54 + .irq_mask = mask_msi_irq, 55 + .irq_unmask = unmask_msi_irq, 56 56 .ack = fsl_msi_end_irq, 57 57 .name = "FSL-MSI", 58 58 };
+11 -11
arch/powerpc/sysdev/mpic_pasemi_msi.c
··· 39 39 static struct mpic *msi_mpic; 40 40 41 41 42 - static void mpic_pasemi_msi_mask_irq(unsigned int irq) 42 + static void mpic_pasemi_msi_mask_irq(struct irq_data *data) 43 43 { 44 - pr_debug("mpic_pasemi_msi_mask_irq %d\n", irq); 45 - mask_msi_irq(irq); 46 - mpic_mask_irq(irq); 44 + pr_debug("mpic_pasemi_msi_mask_irq %d\n", data->irq); 45 + mask_msi_irq(data); 46 + mpic_mask_irq(data->irq); 47 47 } 48 48 49 - static void mpic_pasemi_msi_unmask_irq(unsigned int irq) 49 + static void mpic_pasemi_msi_unmask_irq(struct irq_data *data) 50 50 { 51 - pr_debug("mpic_pasemi_msi_unmask_irq %d\n", irq); 52 - mpic_unmask_irq(irq); 53 - unmask_msi_irq(irq); 51 + pr_debug("mpic_pasemi_msi_unmask_irq %d\n", data->irq); 52 + mpic_unmask_irq(data->irq); 53 + unmask_msi_irq(data); 54 54 } 55 55 56 56 static struct irq_chip mpic_pasemi_msi_chip = { 57 - .shutdown = mpic_pasemi_msi_mask_irq, 58 - .mask = mpic_pasemi_msi_mask_irq, 59 - .unmask = mpic_pasemi_msi_unmask_irq, 57 + .irq_shutdown = mpic_pasemi_msi_mask_irq, 58 + .irq_mask = mpic_pasemi_msi_mask_irq, 59 + .irq_unmask = mpic_pasemi_msi_unmask_irq, 60 60 .eoi = mpic_end_irq, 61 61 .set_type = mpic_set_irq_type, 62 62 .set_affinity = mpic_set_affinity,
+9 -9
arch/powerpc/sysdev/mpic_u3msi.c
··· 23 23 /* A bit ugly, can we get this from the pci_dev somehow? */ 24 24 static struct mpic *msi_mpic; 25 25 26 - static void mpic_u3msi_mask_irq(unsigned int irq) 26 + static void mpic_u3msi_mask_irq(struct irq_data *data) 27 27 { 28 - mask_msi_irq(irq); 29 - mpic_mask_irq(irq); 28 + mask_msi_irq(data); 29 + mpic_mask_irq(data->irq); 30 30 } 31 31 32 - static void mpic_u3msi_unmask_irq(unsigned int irq) 32 + static void mpic_u3msi_unmask_irq(struct irq_data *data) 33 33 { 34 - mpic_unmask_irq(irq); 35 - unmask_msi_irq(irq); 34 + mpic_unmask_irq(data->irq); 35 + unmask_msi_irq(data); 36 36 } 37 37 38 38 static struct irq_chip mpic_u3msi_chip = { 39 - .shutdown = mpic_u3msi_mask_irq, 40 - .mask = mpic_u3msi_mask_irq, 41 - .unmask = mpic_u3msi_unmask_irq, 39 + .irq_shutdown = mpic_u3msi_mask_irq, 40 + .irq_mask = mpic_u3msi_mask_irq, 41 + .irq_unmask = mpic_u3msi_unmask_irq, 42 42 .eoi = mpic_end_irq, 43 43 .set_type = mpic_set_irq_type, 44 44 .set_affinity = mpic_set_affinity,
+1 -1
arch/sh/kernel/irq.c
··· 290 290 int __init arch_probe_nr_irqs(void) 291 291 { 292 292 nr_irqs = sh_mv.mv_nr_irqs; 293 - return 0; 293 + return NR_IRQS_LEGACY; 294 294 } 295 295 #endif 296 296
+4 -4
arch/sparc/kernel/pci_msi.c
··· 114 114 115 115 static struct irq_chip msi_irq = { 116 116 .name = "PCI-MSI", 117 - .mask = mask_msi_irq, 118 - .unmask = unmask_msi_irq, 119 - .enable = unmask_msi_irq, 120 - .disable = mask_msi_irq, 117 + .irq_mask = mask_msi_irq, 118 + .irq_unmask = unmask_msi_irq, 119 + .irq_enable = unmask_msi_irq, 120 + .irq_disable = mask_msi_irq, 121 121 /* XXX affinity XXX */ 122 122 }; 123 123
+2 -2
arch/tile/kernel/irq.c
··· 208 208 } 209 209 210 210 static struct irq_chip tile_irq_chip = { 211 - .typename = "tile_irq_chip", 211 + .name = "tile_irq_chip", 212 212 .ack = tile_irq_chip_ack, 213 213 .eoi = tile_irq_chip_eoi, 214 214 .mask = tile_irq_chip_mask, ··· 288 288 for_each_online_cpu(j) 289 289 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); 290 290 #endif 291 - seq_printf(p, " %14s", irq_desc[i].chip->typename); 291 + seq_printf(p, " %14s", irq_desc[i].chip->name); 292 292 seq_printf(p, " %s", action->name); 293 293 294 294 for (action = action->next; action; action = action->next)
+3 -3
arch/um/kernel/irq.c
··· 46 46 for_each_online_cpu(j) 47 47 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); 48 48 #endif 49 - seq_printf(p, " %14s", irq_desc[i].chip->typename); 49 + seq_printf(p, " %14s", irq_desc[i].chip->name); 50 50 seq_printf(p, " %s", action->name); 51 51 52 52 for (action=action->next; action; action = action->next) ··· 369 369 370 370 /* This is used for everything else than the timer. */ 371 371 static struct irq_chip normal_irq_type = { 372 - .typename = "SIGIO", 372 + .name = "SIGIO", 373 373 .release = free_irq_by_irq_and_dev, 374 374 .disable = dummy, 375 375 .enable = dummy, ··· 378 378 }; 379 379 380 380 static struct irq_chip SIGVTALRM_irq_type = { 381 - .typename = "SIGVTALRM", 381 + .name = "SIGVTALRM", 382 382 .release = free_irq_by_irq_and_dev, 383 383 .shutdown = dummy, /* never called */ 384 384 .disable = dummy,
+4 -31
arch/x86/Kconfig
··· 63 63 select HAVE_USER_RETURN_NOTIFIER 64 64 select HAVE_ARCH_JUMP_LABEL 65 65 select HAVE_TEXT_POKE_SMP 66 + select HAVE_GENERIC_HARDIRQS 67 + select HAVE_SPARSE_IRQ 68 + select GENERIC_IRQ_PROBE 69 + select GENERIC_PENDING_IRQ if SMP 66 70 67 71 config INSTRUCTION_DECODER 68 72 def_bool (KPROBES || PERF_EVENTS) ··· 208 204 def_bool y 209 205 depends on EXPERIMENTAL && DMAR && ACPI 210 206 211 - # Use the generic interrupt handling code in kernel/irq/: 212 - config GENERIC_HARDIRQS 213 - def_bool y 214 - 215 - config GENERIC_HARDIRQS_NO__DO_IRQ 216 - def_bool y 217 - 218 - config GENERIC_IRQ_PROBE 219 - def_bool y 220 - 221 - config GENERIC_PENDING_IRQ 222 - def_bool y 223 - depends on GENERIC_HARDIRQS && SMP 224 - 225 207 config USE_GENERIC_SMP_HELPERS 226 208 def_bool y 227 209 depends on SMP ··· 289 299 and accesses the local apic via MSRs not via mmio. 290 300 291 301 If you don't know what to do here, say N. 292 - 293 - config SPARSE_IRQ 294 - bool "Support sparse irq numbering" 295 - depends on PCI_MSI || HT_IRQ 296 - ---help--- 297 - This enables support for sparse irqs. This is useful for distro 298 - kernels that want to define a high CONFIG_NR_CPUS value but still 299 - want to have low kernel memory footprint on smaller machines. 300 - 301 - ( Sparse IRQs can also be beneficial on NUMA boxes, as they spread 302 - out the irq_desc[] array in a more NUMA-friendly way. ) 303 - 304 - If you don't know what to do here, say N. 305 - 306 - config NUMA_IRQ_DESC 307 - def_bool y 308 - depends on SPARSE_IRQ && NUMA 309 302 310 303 config X86_MPPARSE 311 304 bool "Enable MPS table" if ACPI
+1 -3
arch/x86/include/asm/apic.h
··· 252 252 } 253 253 #endif 254 254 255 - extern u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask); 256 - extern u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask); 257 - 255 + extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask); 258 256 259 257 #else /* !CONFIG_X86_LOCAL_APIC */ 260 258 static inline void lapic_shutdown(void) { }
+1
arch/x86/include/asm/apicdef.h
··· 131 131 #define APIC_EILVTn(n) (0x500 + 0x10 * n) 132 132 #define APIC_EILVT_NR_AMD_K8 1 /* # of extended interrupts */ 133 133 #define APIC_EILVT_NR_AMD_10H 4 134 + #define APIC_EILVT_NR_MAX APIC_EILVT_NR_AMD_10H 134 135 #define APIC_EILVT_LVTOFF(x) (((x) >> 4) & 0xF) 135 136 #define APIC_EILVT_MSG_FIX 0x0 136 137 #define APIC_EILVT_MSG_SMI 0x2
+6 -4
arch/x86/include/asm/hpet.h
··· 74 74 extern unsigned int hpet_readl(unsigned int a); 75 75 extern void force_hpet_resume(void); 76 76 77 - extern void hpet_msi_unmask(unsigned int irq); 78 - extern void hpet_msi_mask(unsigned int irq); 79 - extern void hpet_msi_write(unsigned int irq, struct msi_msg *msg); 80 - extern void hpet_msi_read(unsigned int irq, struct msi_msg *msg); 77 + struct irq_data; 78 + extern void hpet_msi_unmask(struct irq_data *data); 79 + extern void hpet_msi_mask(struct irq_data *data); 80 + struct hpet_dev; 81 + extern void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg); 82 + extern void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg); 81 83 82 84 #ifdef CONFIG_PCI_MSI 83 85 extern int arch_setup_hpet_msi(unsigned int irq, unsigned int id);
+13 -4
arch/x86/include/asm/hw_irq.h
··· 78 78 irq_attr->polarity = polarity; 79 79 } 80 80 81 + struct irq_2_iommu { 82 + struct intel_iommu *iommu; 83 + u16 irte_index; 84 + u16 sub_handle; 85 + u8 irte_mask; 86 + }; 87 + 81 88 /* 82 89 * This is performance-critical, we want to do it O(1) 83 90 * ··· 96 89 cpumask_var_t old_domain; 97 90 u8 vector; 98 91 u8 move_in_progress : 1; 92 + #ifdef CONFIG_INTR_REMAP 93 + struct irq_2_iommu irq_2_iommu; 94 + #endif 99 95 }; 100 96 101 - extern struct irq_cfg *irq_cfg(unsigned int); 102 97 extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *); 103 98 extern void send_cleanup_vector(struct irq_cfg *); 104 99 105 - struct irq_desc; 106 - extern unsigned int set_desc_affinity(struct irq_desc *, const struct cpumask *, 107 - unsigned int *dest_id); 100 + struct irq_data; 101 + int __ioapic_set_affinity(struct irq_data *, const struct cpumask *, 102 + unsigned int *dest_id); 108 103 extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, struct io_apic_irq_attr *irq_attr); 109 104 extern void setup_ioapic_dest(void); 110 105
+2
arch/x86/include/asm/i8259.h
··· 55 55 struct legacy_pic { 56 56 int nr_legacy_irqs; 57 57 struct irq_chip *chip; 58 + void (*mask)(unsigned int irq); 59 + void (*unmask)(unsigned int irq); 58 60 void (*mask_all)(void); 59 61 void (*restore_mask)(void); 60 62 void (*init)(int auto_eoi);
-6
arch/x86/include/asm/io_apic.h
··· 170 170 171 171 extern void probe_nr_irqs_gsi(void); 172 172 173 - extern int setup_ioapic_entry(int apic, int irq, 174 - struct IO_APIC_route_entry *entry, 175 - unsigned int destination, int trigger, 176 - int polarity, int vector, int pin); 177 - extern void ioapic_write_entry(int apic, int pin, 178 - struct IO_APIC_route_entry e); 179 173 extern void setup_ioapic_ids_from_mpc(void); 180 174 181 175 struct mp_ioapic_gsi{
+8
arch/x86/include/asm/irq_remapping.h
··· 24 24 irte->dest_id = IRTE_DEST(dest); 25 25 irte->redir_hint = 1; 26 26 } 27 + static inline bool irq_remapped(struct irq_cfg *cfg) 28 + { 29 + return cfg->irq_2_iommu.iommu != NULL; 30 + } 27 31 #else 28 32 static void prepare_irte(struct irte *irte, int vector, unsigned int dest) 29 33 { 34 + } 35 + static inline bool irq_remapped(struct irq_cfg *cfg) 36 + { 37 + return false; 30 38 } 31 39 #endif 32 40
+24 -30
arch/x86/kernel/apb_timer.c
··· 231 231 apbt_start_counter(phy_cs_timer_id); 232 232 } 233 233 234 - /* Setup IRQ routing via IOAPIC */ 235 - #ifdef CONFIG_SMP 236 - static void apbt_setup_irq(struct apbt_dev *adev) 237 - { 238 - struct irq_chip *chip; 239 - struct irq_desc *desc; 240 - 241 - /* timer0 irq has been setup early */ 242 - if (adev->irq == 0) 243 - return; 244 - desc = irq_to_desc(adev->irq); 245 - chip = get_irq_chip(adev->irq); 246 - disable_irq(adev->irq); 247 - desc->status |= IRQ_MOVE_PCNTXT; 248 - irq_set_affinity(adev->irq, cpumask_of(adev->cpu)); 249 - /* APB timer irqs are set up as mp_irqs, timer is edge triggerred */ 250 - set_irq_chip_and_handler_name(adev->irq, chip, handle_edge_irq, "edge"); 251 - enable_irq(adev->irq); 252 - if (system_state == SYSTEM_BOOTING) 253 - if (request_irq(adev->irq, apbt_interrupt_handler, 254 - IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING, 255 - adev->name, adev)) { 256 - printk(KERN_ERR "Failed request IRQ for APBT%d\n", 257 - adev->num); 258 - } 259 - } 260 - #endif 261 - 262 234 static void apbt_enable_int(int n) 263 235 { 264 236 unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL); ··· 306 334 } 307 335 308 336 #ifdef CONFIG_SMP 337 + 338 + static void apbt_setup_irq(struct apbt_dev *adev) 339 + { 340 + /* timer0 irq has been setup early */ 341 + if (adev->irq == 0) 342 + return; 343 + 344 + if (system_state == SYSTEM_BOOTING) { 345 + irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT); 346 + /* APB timer irqs are set up as mp_irqs, timer is edge type */ 347 + __set_irq_handler(adev->irq, handle_edge_irq, 0, "edge"); 348 + if (request_irq(adev->irq, apbt_interrupt_handler, 349 + IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING, 350 + adev->name, adev)) { 351 + printk(KERN_ERR "Failed request IRQ for APBT%d\n", 352 + adev->num); 353 + } 354 + } else 355 + enable_irq(adev->irq); 356 + } 357 + 309 358 /* Should be called with per cpu */ 310 359 void apbt_setup_secondary_clock(void) 311 360 { ··· 382 389 383 390 switch (action & 0xf) { 384 391 case CPU_DEAD: 392 + disable_irq(adev->irq); 385 393 apbt_disable_int(cpu); 386 - if (system_state == SYSTEM_RUNNING) 394 + if (system_state == SYSTEM_RUNNING) { 387 395 pr_debug("skipping APBT CPU %lu offline\n", cpu); 388 - else if (adev) { 396 + } else if (adev) { 389 397 pr_debug("APBT clockevent for cpu %lu offline\n", cpu); 390 398 free_irq(adev->irq, adev); 391 399 }
+71 -21
arch/x86/kernel/apic/apic.c
··· 52 52 #include <asm/mce.h> 53 53 #include <asm/kvm_para.h> 54 54 #include <asm/tsc.h> 55 + #include <asm/atomic.h> 55 56 56 57 unsigned int num_processors; 57 58 ··· 371 370 } 372 371 373 372 /* 374 - * Setup extended LVT, AMD specific (K8, family 10h) 373 + * Setup extended LVT, AMD specific 375 374 * 376 - * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and 377 - * MCE interrupts are supported. Thus MCE offset must be set to 0. 375 + * Software should use the LVT offsets the BIOS provides. The offsets 376 + * are determined by the subsystems using it like those for MCE 377 + * threshold or IBS. On K8 only offset 0 (APIC500) and MCE interrupts 378 + * are supported. Beginning with family 10h at least 4 offsets are 379 + * available. 378 380 * 381 + * Since the offsets must be consistent for all cores, we keep track 382 + * of the LVT offsets in software and reserve the offset for the same 383 + * vector also to be used on other cores. An offset is freed by 384 + * setting the entry to APIC_EILVT_MASKED. 385 + * 386 + * If the BIOS is right, there should be no conflicts. Otherwise a 387 + * "[Firmware Bug]: ..." error message is generated. However, if 388 + * software does not properly determines the offsets, it is not 389 + * necessarily a BIOS bug. 390 + */ 391 + 392 + static atomic_t eilvt_offsets[APIC_EILVT_NR_MAX]; 393 + 394 + static inline int eilvt_entry_is_changeable(unsigned int old, unsigned int new) 395 + { 396 + return (old & APIC_EILVT_MASKED) 397 + || (new == APIC_EILVT_MASKED) 398 + || ((new & ~APIC_EILVT_MASKED) == old); 399 + } 400 + 401 + static unsigned int reserve_eilvt_offset(int offset, unsigned int new) 402 + { 403 + unsigned int rsvd; /* 0: uninitialized */ 404 + 405 + if (offset >= APIC_EILVT_NR_MAX) 406 + return ~0; 407 + 408 + rsvd = atomic_read(&eilvt_offsets[offset]) & ~APIC_EILVT_MASKED; 409 + do { 410 + if (rsvd && 411 + !eilvt_entry_is_changeable(rsvd, new)) 412 + /* may not change if vectors are different */ 413 + return rsvd; 414 + rsvd = atomic_cmpxchg(&eilvt_offsets[offset], rsvd, new); 415 + } while (rsvd != new); 416 + 417 + return new; 418 + } 419 + 420 + /* 379 421 * If mask=1, the LVT entry does not generate interrupts while mask=0 380 422 * enables the vector. See also the BKDGs. 381 423 */ 382 424 383 - #define APIC_EILVT_LVTOFF_MCE 0 384 - #define APIC_EILVT_LVTOFF_IBS 1 385 - 386 - static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask) 425 + int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask) 387 426 { 388 - unsigned long reg = (lvt_off << 4) + APIC_EILVTn(0); 389 - unsigned int v = (mask << 16) | (msg_type << 8) | vector; 427 + unsigned long reg = APIC_EILVTn(offset); 428 + unsigned int new, old, reserved; 390 429 391 - apic_write(reg, v); 392 - } 430 + new = (mask << 16) | (msg_type << 8) | vector; 431 + old = apic_read(reg); 432 + reserved = reserve_eilvt_offset(offset, new); 393 433 394 - u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask) 395 - { 396 - setup_APIC_eilvt(APIC_EILVT_LVTOFF_MCE, vector, msg_type, mask); 397 - return APIC_EILVT_LVTOFF_MCE; 398 - } 434 + if (reserved != new) { 435 + pr_err(FW_BUG "cpu %d, try to setup vector 0x%x, but " 436 + "vector 0x%x was already reserved by another core, " 437 + "APIC%lX=0x%x\n", 438 + smp_processor_id(), new, reserved, reg, old); 439 + return -EINVAL; 440 + } 399 441 400 - u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask) 401 - { 402 - setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask); 403 - return APIC_EILVT_LVTOFF_IBS; 442 + if (!eilvt_entry_is_changeable(old, new)) { 443 + pr_err(FW_BUG "cpu %d, try to setup vector 0x%x but " 444 + "register already in use, APIC%lX=0x%x\n", 445 + smp_processor_id(), new, reg, old); 446 + return -EBUSY; 447 + } 448 + 449 + apic_write(reg, new); 450 + 451 + return 0; 404 452 } 405 - EXPORT_SYMBOL_GPL(setup_APIC_eilvt_ibs); 453 + EXPORT_SYMBOL_GPL(setup_APIC_eilvt); 406 454 407 455 /* 408 456 * Program the next event, relative to now
+338 -539
arch/x86/kernel/apic/io_apic.c
··· 131 131 struct irq_pin_list *next; 132 132 }; 133 133 134 - static struct irq_pin_list *get_one_free_irq_2_pin(int node) 134 + static struct irq_pin_list *alloc_irq_pin_list(int node) 135 135 { 136 - struct irq_pin_list *pin; 137 - 138 - pin = kzalloc_node(sizeof(*pin), GFP_ATOMIC, node); 139 - 140 - return pin; 136 + return kzalloc_node(sizeof(struct irq_pin_list), GFP_KERNEL, node); 141 137 } 142 138 143 139 /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ ··· 146 150 int __init arch_early_irq_init(void) 147 151 { 148 152 struct irq_cfg *cfg; 149 - struct irq_desc *desc; 150 - int count; 151 - int node; 152 - int i; 153 + int count, node, i; 153 154 154 155 if (!legacy_pic->nr_legacy_irqs) { 155 156 nr_irqs_gsi = 0; ··· 157 164 count = ARRAY_SIZE(irq_cfgx); 158 165 node = cpu_to_node(0); 159 166 167 + /* Make sure the legacy interrupts are marked in the bitmap */ 168 + irq_reserve_irqs(0, legacy_pic->nr_legacy_irqs); 169 + 160 170 for (i = 0; i < count; i++) { 161 - desc = irq_to_desc(i); 162 - desc->chip_data = &cfg[i]; 163 - zalloc_cpumask_var_node(&cfg[i].domain, GFP_NOWAIT, node); 164 - zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_NOWAIT, node); 171 + set_irq_chip_data(i, &cfg[i]); 172 + zalloc_cpumask_var_node(&cfg[i].domain, GFP_KERNEL, node); 173 + zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_KERNEL, node); 165 174 /* 166 175 * For legacy IRQ's, start with assigning irq0 to irq15 to 167 176 * IRQ0_VECTOR to IRQ15_VECTOR on cpu 0. ··· 178 183 } 179 184 180 185 #ifdef CONFIG_SPARSE_IRQ 181 - struct irq_cfg *irq_cfg(unsigned int irq) 186 + static struct irq_cfg *irq_cfg(unsigned int irq) 182 187 { 183 - struct irq_cfg *cfg = NULL; 184 - struct irq_desc *desc; 188 + return get_irq_chip_data(irq); 189 + } 185 190 186 - desc = irq_to_desc(irq); 187 - if (desc) 188 - cfg = desc->chip_data; 191 + static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node) 192 + { 193 + struct irq_cfg *cfg; 189 194 195 + cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node); 196 + if (!cfg) 197 + return NULL; 198 + if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node)) 199 + goto out_cfg; 200 + if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node)) 201 + goto out_domain; 190 202 return cfg; 203 + out_domain: 204 + free_cpumask_var(cfg->domain); 205 + out_cfg: 206 + kfree(cfg); 207 + return NULL; 191 208 } 192 209 193 - static struct irq_cfg *get_one_free_irq_cfg(int node) 210 + static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) 194 211 { 195 - struct irq_cfg *cfg; 196 - 197 - cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); 198 - if (cfg) { 199 - if (!zalloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) { 200 - kfree(cfg); 201 - cfg = NULL; 202 - } else if (!zalloc_cpumask_var_node(&cfg->old_domain, 203 - GFP_ATOMIC, node)) { 204 - free_cpumask_var(cfg->domain); 205 - kfree(cfg); 206 - cfg = NULL; 207 - } 208 - } 209 - 210 - return cfg; 211 - } 212 - 213 - int arch_init_chip_data(struct irq_desc *desc, int node) 214 - { 215 - struct irq_cfg *cfg; 216 - 217 - cfg = desc->chip_data; 218 - if (!cfg) { 219 - desc->chip_data = get_one_free_irq_cfg(node); 220 - if (!desc->chip_data) { 221 - printk(KERN_ERR "can not alloc irq_cfg\n"); 222 - BUG_ON(1); 223 - } 224 - } 225 - 226 - return 0; 227 - } 228 - 229 - /* for move_irq_desc */ 230 - static void 231 - init_copy_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg, int node) 232 - { 233 - struct irq_pin_list *old_entry, *head, *tail, *entry; 234 - 235 - cfg->irq_2_pin = NULL; 236 - old_entry = old_cfg->irq_2_pin; 237 - if (!old_entry) 238 - return; 239 - 240 - entry = get_one_free_irq_2_pin(node); 241 - if (!entry) 242 - return; 243 - 244 - entry->apic = old_entry->apic; 245 - entry->pin = old_entry->pin; 246 - head = entry; 247 - tail = entry; 248 - old_entry = old_entry->next; 249 - while (old_entry) { 250 - entry = get_one_free_irq_2_pin(node); 251 - if (!entry) { 252 - entry = head; 253 - while (entry) { 254 - head = entry->next; 255 - kfree(entry); 256 - entry = head; 257 - } 258 - /* still use the old one */ 259 - return; 260 - } 261 - entry->apic = old_entry->apic; 262 - entry->pin = old_entry->pin; 263 - tail->next = entry; 264 - tail = entry; 265 - old_entry = old_entry->next; 266 - } 267 - 268 - tail->next = NULL; 269 - cfg->irq_2_pin = head; 270 - } 271 - 272 - static void free_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg) 273 - { 274 - struct irq_pin_list *entry, *next; 275 - 276 - if (old_cfg->irq_2_pin == cfg->irq_2_pin) 277 - return; 278 - 279 - entry = old_cfg->irq_2_pin; 280 - 281 - while (entry) { 282 - next = entry->next; 283 - kfree(entry); 284 - entry = next; 285 - } 286 - old_cfg->irq_2_pin = NULL; 287 - } 288 - 289 - void arch_init_copy_chip_data(struct irq_desc *old_desc, 290 - struct irq_desc *desc, int node) 291 - { 292 - struct irq_cfg *cfg; 293 - struct irq_cfg *old_cfg; 294 - 295 - cfg = get_one_free_irq_cfg(node); 296 - 297 212 if (!cfg) 298 213 return; 299 - 300 - desc->chip_data = cfg; 301 - 302 - old_cfg = old_desc->chip_data; 303 - 304 - cfg->vector = old_cfg->vector; 305 - cfg->move_in_progress = old_cfg->move_in_progress; 306 - cpumask_copy(cfg->domain, old_cfg->domain); 307 - cpumask_copy(cfg->old_domain, old_cfg->old_domain); 308 - 309 - init_copy_irq_2_pin(old_cfg, cfg, node); 310 - } 311 - 312 - static void free_irq_cfg(struct irq_cfg *cfg) 313 - { 214 + set_irq_chip_data(at, NULL); 314 215 free_cpumask_var(cfg->domain); 315 216 free_cpumask_var(cfg->old_domain); 316 217 kfree(cfg); 317 218 } 318 219 319 - void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc) 320 - { 321 - struct irq_cfg *old_cfg, *cfg; 322 - 323 - old_cfg = old_desc->chip_data; 324 - cfg = desc->chip_data; 325 - 326 - if (old_cfg == cfg) 327 - return; 328 - 329 - if (old_cfg) { 330 - free_irq_2_pin(old_cfg, cfg); 331 - free_irq_cfg(old_cfg); 332 - old_desc->chip_data = NULL; 333 - } 334 - } 335 - /* end for move_irq_desc */ 336 - 337 220 #else 221 + 338 222 struct irq_cfg *irq_cfg(unsigned int irq) 339 223 { 340 224 return irq < nr_irqs ? irq_cfgx + irq : NULL; 341 225 } 342 226 227 + static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node) 228 + { 229 + return irq_cfgx + irq; 230 + } 231 + 232 + static inline void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) { } 233 + 343 234 #endif 235 + 236 + static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node) 237 + { 238 + int res = irq_alloc_desc_at(at, node); 239 + struct irq_cfg *cfg; 240 + 241 + if (res < 0) { 242 + if (res != -EEXIST) 243 + return NULL; 244 + cfg = get_irq_chip_data(at); 245 + if (cfg) 246 + return cfg; 247 + } 248 + 249 + cfg = alloc_irq_cfg(at, node); 250 + if (cfg) 251 + set_irq_chip_data(at, cfg); 252 + else 253 + irq_free_desc(at); 254 + return cfg; 255 + } 256 + 257 + static int alloc_irq_from(unsigned int from, int node) 258 + { 259 + return irq_alloc_desc_from(from, node); 260 + } 261 + 262 + static void free_irq_at(unsigned int at, struct irq_cfg *cfg) 263 + { 264 + free_irq_cfg(at, cfg); 265 + irq_free_desc(at); 266 + } 344 267 345 268 struct io_apic { 346 269 unsigned int index; ··· 364 451 io_apic_write(apic, 0x10 + 2*pin, eu.w1); 365 452 } 366 453 367 - void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) 454 + static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) 368 455 { 369 456 unsigned long flags; 370 457 raw_spin_lock_irqsave(&ioapic_lock, flags); ··· 394 481 * fast in the common case, and fast for shared ISA-space IRQs. 395 482 */ 396 483 static int 397 - add_pin_to_irq_node_nopanic(struct irq_cfg *cfg, int node, int apic, int pin) 484 + __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) 398 485 { 399 486 struct irq_pin_list **last, *entry; 400 487 ··· 406 493 last = &entry->next; 407 494 } 408 495 409 - entry = get_one_free_irq_2_pin(node); 496 + entry = alloc_irq_pin_list(node); 410 497 if (!entry) { 411 498 printk(KERN_ERR "can not alloc irq_pin_list (%d,%d,%d)\n", 412 499 node, apic, pin); ··· 421 508 422 509 static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) 423 510 { 424 - if (add_pin_to_irq_node_nopanic(cfg, node, apic, pin)) 511 + if (__add_pin_to_irq_node(cfg, node, apic, pin)) 425 512 panic("IO-APIC: failed to add irq-pin. Can not proceed\n"); 426 513 } 427 514 ··· 484 571 IO_APIC_REDIR_LEVEL_TRIGGER, NULL); 485 572 } 486 573 487 - static void __unmask_IO_APIC_irq(struct irq_cfg *cfg) 488 - { 489 - io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL); 490 - } 491 - 492 574 static void io_apic_sync(struct irq_pin_list *entry) 493 575 { 494 576 /* ··· 495 587 readl(&io_apic->data); 496 588 } 497 589 498 - static void __mask_IO_APIC_irq(struct irq_cfg *cfg) 590 + static void mask_ioapic(struct irq_cfg *cfg) 499 591 { 592 + unsigned long flags; 593 + 594 + raw_spin_lock_irqsave(&ioapic_lock, flags); 500 595 io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); 501 - } 502 - 503 - static void mask_IO_APIC_irq_desc(struct irq_desc *desc) 504 - { 505 - struct irq_cfg *cfg = desc->chip_data; 506 - unsigned long flags; 507 - 508 - BUG_ON(!cfg); 509 - 510 - raw_spin_lock_irqsave(&ioapic_lock, flags); 511 - __mask_IO_APIC_irq(cfg); 512 596 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 513 597 } 514 598 515 - static void unmask_IO_APIC_irq_desc(struct irq_desc *desc) 599 + static void mask_ioapic_irq(struct irq_data *data) 516 600 { 517 - struct irq_cfg *cfg = desc->chip_data; 601 + mask_ioapic(data->chip_data); 602 + } 603 + 604 + static void __unmask_ioapic(struct irq_cfg *cfg) 605 + { 606 + io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL); 607 + } 608 + 609 + static void unmask_ioapic(struct irq_cfg *cfg) 610 + { 518 611 unsigned long flags; 519 612 520 613 raw_spin_lock_irqsave(&ioapic_lock, flags); 521 - __unmask_IO_APIC_irq(cfg); 614 + __unmask_ioapic(cfg); 522 615 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 523 616 } 524 617 525 - static void mask_IO_APIC_irq(unsigned int irq) 618 + static void unmask_ioapic_irq(struct irq_data *data) 526 619 { 527 - struct irq_desc *desc = irq_to_desc(irq); 528 - 529 - mask_IO_APIC_irq_desc(desc); 530 - } 531 - static void unmask_IO_APIC_irq(unsigned int irq) 532 - { 533 - struct irq_desc *desc = irq_to_desc(irq); 534 - 535 - unmask_IO_APIC_irq_desc(desc); 620 + unmask_ioapic(data->chip_data); 536 621 } 537 622 538 623 static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) ··· 595 694 struct IO_APIC_route_entry **ioapic_entries; 596 695 597 696 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics, 598 - GFP_ATOMIC); 697 + GFP_KERNEL); 599 698 if (!ioapic_entries) 600 699 return 0; 601 700 602 701 for (apic = 0; apic < nr_ioapics; apic++) { 603 702 ioapic_entries[apic] = 604 703 kzalloc(sizeof(struct IO_APIC_route_entry) * 605 - nr_ioapic_registers[apic], GFP_ATOMIC); 704 + nr_ioapic_registers[apic], GFP_KERNEL); 606 705 if (!ioapic_entries[apic]) 607 706 goto nomem; 608 707 } ··· 1160 1259 /* Initialize vector_irq on a new cpu */ 1161 1260 int irq, vector; 1162 1261 struct irq_cfg *cfg; 1163 - struct irq_desc *desc; 1164 1262 1165 1263 /* 1166 1264 * vector_lock will make sure that we don't run into irq vector ··· 1168 1268 */ 1169 1269 raw_spin_lock(&vector_lock); 1170 1270 /* Mark the inuse vectors */ 1171 - for_each_irq_desc(irq, desc) { 1172 - cfg = desc->chip_data; 1173 - 1271 + for_each_active_irq(irq) { 1272 + cfg = get_irq_chip_data(irq); 1273 + if (!cfg) 1274 + continue; 1174 1275 /* 1175 1276 * If it is a legacy IRQ handled by the legacy PIC, this cpu 1176 1277 * will be part of the irq_cfg's domain. ··· 1228 1327 } 1229 1328 #endif 1230 1329 1231 - static void ioapic_register_intr(int irq, struct irq_desc *desc, unsigned long trigger) 1330 + static void ioapic_register_intr(unsigned int irq, unsigned long trigger) 1232 1331 { 1233 1332 1234 1333 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || 1235 1334 trigger == IOAPIC_LEVEL) 1236 - desc->status |= IRQ_LEVEL; 1335 + irq_set_status_flags(irq, IRQ_LEVEL); 1237 1336 else 1238 - desc->status &= ~IRQ_LEVEL; 1337 + irq_clear_status_flags(irq, IRQ_LEVEL); 1239 1338 1240 - if (irq_remapped(irq)) { 1241 - desc->status |= IRQ_MOVE_PCNTXT; 1339 + if (irq_remapped(get_irq_chip_data(irq))) { 1340 + irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); 1242 1341 if (trigger) 1243 1342 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip, 1244 1343 handle_fasteoi_irq, ··· 1259 1358 handle_edge_irq, "edge"); 1260 1359 } 1261 1360 1262 - int setup_ioapic_entry(int apic_id, int irq, 1263 - struct IO_APIC_route_entry *entry, 1264 - unsigned int destination, int trigger, 1265 - int polarity, int vector, int pin) 1361 + static int setup_ioapic_entry(int apic_id, int irq, 1362 + struct IO_APIC_route_entry *entry, 1363 + unsigned int destination, int trigger, 1364 + int polarity, int vector, int pin) 1266 1365 { 1267 1366 /* 1268 1367 * add it to the IO-APIC irq-routing table: ··· 1318 1417 return 0; 1319 1418 } 1320 1419 1321 - static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq_desc *desc, 1322 - int trigger, int polarity) 1420 + static void setup_ioapic_irq(int apic_id, int pin, unsigned int irq, 1421 + struct irq_cfg *cfg, int trigger, int polarity) 1323 1422 { 1324 - struct irq_cfg *cfg; 1325 1423 struct IO_APIC_route_entry entry; 1326 1424 unsigned int dest; 1327 1425 1328 1426 if (!IO_APIC_IRQ(irq)) 1329 1427 return; 1330 - 1331 - cfg = desc->chip_data; 1332 - 1333 1428 /* 1334 1429 * For legacy irqs, cfg->domain starts with cpu 0 for legacy 1335 1430 * controllers like 8259. Now that IO-APIC can handle this irq, update ··· 1354 1457 return; 1355 1458 } 1356 1459 1357 - ioapic_register_intr(irq, desc, trigger); 1460 + ioapic_register_intr(irq, trigger); 1358 1461 if (irq < legacy_pic->nr_legacy_irqs) 1359 - legacy_pic->chip->mask(irq); 1462 + legacy_pic->mask(irq); 1360 1463 1361 1464 ioapic_write_entry(apic_id, pin, entry); 1362 1465 } ··· 1367 1470 1368 1471 static void __init setup_IO_APIC_irqs(void) 1369 1472 { 1370 - int apic_id, pin, idx, irq; 1371 - int notcon = 0; 1372 - struct irq_desc *desc; 1373 - struct irq_cfg *cfg; 1473 + int apic_id, pin, idx, irq, notcon = 0; 1374 1474 int node = cpu_to_node(0); 1475 + struct irq_cfg *cfg; 1375 1476 1376 1477 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); 1377 1478 ··· 1406 1511 apic->multi_timer_check(apic_id, irq)) 1407 1512 continue; 1408 1513 1409 - desc = irq_to_desc_alloc_node(irq, node); 1410 - if (!desc) { 1411 - printk(KERN_INFO "can not get irq_desc for %d\n", irq); 1514 + cfg = alloc_irq_and_cfg_at(irq, node); 1515 + if (!cfg) 1412 1516 continue; 1413 - } 1414 - cfg = desc->chip_data; 1517 + 1415 1518 add_pin_to_irq_node(cfg, node, apic_id, pin); 1416 1519 /* 1417 1520 * don't mark it in pin_programmed, so later acpi could 1418 1521 * set it correctly when irq < 16 1419 1522 */ 1420 - setup_IO_APIC_irq(apic_id, pin, irq, desc, 1421 - irq_trigger(idx), irq_polarity(idx)); 1523 + setup_ioapic_irq(apic_id, pin, irq, cfg, irq_trigger(idx), 1524 + irq_polarity(idx)); 1422 1525 } 1423 1526 1424 1527 if (notcon) ··· 1431 1538 */ 1432 1539 void setup_IO_APIC_irq_extra(u32 gsi) 1433 1540 { 1434 - int apic_id = 0, pin, idx, irq; 1435 - int node = cpu_to_node(0); 1436 - struct irq_desc *desc; 1541 + int apic_id = 0, pin, idx, irq, node = cpu_to_node(0); 1437 1542 struct irq_cfg *cfg; 1438 1543 1439 1544 /* ··· 1447 1556 return; 1448 1557 1449 1558 irq = pin_2_irq(idx, apic_id, pin); 1450 - #ifdef CONFIG_SPARSE_IRQ 1451 - desc = irq_to_desc(irq); 1452 - if (desc) 1453 - return; 1454 - #endif 1455 - desc = irq_to_desc_alloc_node(irq, node); 1456 - if (!desc) { 1457 - printk(KERN_INFO "can not get irq_desc for %d\n", irq); 1458 - return; 1459 - } 1460 1559 1461 - cfg = desc->chip_data; 1560 + /* Only handle the non legacy irqs on secondary ioapics */ 1561 + if (apic_id == 0 || irq < NR_IRQS_LEGACY) 1562 + return; 1563 + 1564 + cfg = alloc_irq_and_cfg_at(irq, node); 1565 + if (!cfg) 1566 + return; 1567 + 1462 1568 add_pin_to_irq_node(cfg, node, apic_id, pin); 1463 1569 1464 1570 if (test_bit(pin, mp_ioapic_routing[apic_id].pin_programmed)) { ··· 1465 1577 } 1466 1578 set_bit(pin, mp_ioapic_routing[apic_id].pin_programmed); 1467 1579 1468 - setup_IO_APIC_irq(apic_id, pin, irq, desc, 1580 + setup_ioapic_irq(apic_id, pin, irq, cfg, 1469 1581 irq_trigger(idx), irq_polarity(idx)); 1470 1582 } 1471 1583 ··· 1516 1628 union IO_APIC_reg_03 reg_03; 1517 1629 unsigned long flags; 1518 1630 struct irq_cfg *cfg; 1519 - struct irq_desc *desc; 1520 1631 unsigned int irq; 1521 1632 1522 1633 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); ··· 1602 1715 } 1603 1716 } 1604 1717 printk(KERN_DEBUG "IRQ to pin mappings:\n"); 1605 - for_each_irq_desc(irq, desc) { 1718 + for_each_active_irq(irq) { 1606 1719 struct irq_pin_list *entry; 1607 1720 1608 - cfg = desc->chip_data; 1721 + cfg = get_irq_chip_data(irq); 1609 1722 if (!cfg) 1610 1723 continue; 1611 1724 entry = cfg->irq_2_pin; ··· 2112 2225 * an edge even if it isn't on the 8259A... 2113 2226 */ 2114 2227 2115 - static unsigned int startup_ioapic_irq(unsigned int irq) 2228 + static unsigned int startup_ioapic_irq(struct irq_data *data) 2116 2229 { 2117 - int was_pending = 0; 2230 + int was_pending = 0, irq = data->irq; 2118 2231 unsigned long flags; 2119 - struct irq_cfg *cfg; 2120 2232 2121 2233 raw_spin_lock_irqsave(&ioapic_lock, flags); 2122 2234 if (irq < legacy_pic->nr_legacy_irqs) { 2123 - legacy_pic->chip->mask(irq); 2235 + legacy_pic->mask(irq); 2124 2236 if (legacy_pic->irq_pending(irq)) 2125 2237 was_pending = 1; 2126 2238 } 2127 - cfg = irq_cfg(irq); 2128 - __unmask_IO_APIC_irq(cfg); 2239 + __unmask_ioapic(data->chip_data); 2129 2240 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2130 2241 2131 2242 return was_pending; 2132 2243 } 2133 2244 2134 - static int ioapic_retrigger_irq(unsigned int irq) 2245 + static int ioapic_retrigger_irq(struct irq_data *data) 2135 2246 { 2136 - 2137 - struct irq_cfg *cfg = irq_cfg(irq); 2247 + struct irq_cfg *cfg = data->chip_data; 2138 2248 unsigned long flags; 2139 2249 2140 2250 raw_spin_lock_irqsave(&vector_lock, flags); ··· 2182 2298 * With interrupt-remapping, destination information comes 2183 2299 * from interrupt-remapping table entry. 2184 2300 */ 2185 - if (!irq_remapped(irq)) 2301 + if (!irq_remapped(cfg)) 2186 2302 io_apic_write(apic, 0x11 + pin*2, dest); 2187 2303 reg = io_apic_read(apic, 0x10 + pin*2); 2188 2304 reg &= ~IO_APIC_REDIR_VECTOR_MASK; ··· 2192 2308 } 2193 2309 2194 2310 /* 2195 - * Either sets desc->affinity to a valid value, and returns 2311 + * Either sets data->affinity to a valid value, and returns 2196 2312 * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and 2197 - * leaves desc->affinity untouched. 2313 + * leaves data->affinity untouched. 2198 2314 */ 2199 - unsigned int 2200 - set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask, 2201 - unsigned int *dest_id) 2315 + int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 2316 + unsigned int *dest_id) 2202 2317 { 2203 - struct irq_cfg *cfg; 2204 - unsigned int irq; 2318 + struct irq_cfg *cfg = data->chip_data; 2205 2319 2206 2320 if (!cpumask_intersects(mask, cpu_online_mask)) 2207 2321 return -1; 2208 2322 2209 - irq = desc->irq; 2210 - cfg = desc->chip_data; 2211 - if (assign_irq_vector(irq, cfg, mask)) 2323 + if (assign_irq_vector(data->irq, data->chip_data, mask)) 2212 2324 return -1; 2213 2325 2214 - cpumask_copy(desc->affinity, mask); 2326 + cpumask_copy(data->affinity, mask); 2215 2327 2216 - *dest_id = apic->cpu_mask_to_apicid_and(desc->affinity, cfg->domain); 2328 + *dest_id = apic->cpu_mask_to_apicid_and(mask, cfg->domain); 2217 2329 return 0; 2218 2330 } 2219 2331 2220 2332 static int 2221 - set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask) 2333 + ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 2334 + bool force) 2222 2335 { 2223 - struct irq_cfg *cfg; 2336 + unsigned int dest, irq = data->irq; 2224 2337 unsigned long flags; 2225 - unsigned int dest; 2226 - unsigned int irq; 2227 - int ret = -1; 2228 - 2229 - irq = desc->irq; 2230 - cfg = desc->chip_data; 2338 + int ret; 2231 2339 2232 2340 raw_spin_lock_irqsave(&ioapic_lock, flags); 2233 - ret = set_desc_affinity(desc, mask, &dest); 2341 + ret = __ioapic_set_affinity(data, mask, &dest); 2234 2342 if (!ret) { 2235 2343 /* Only the high 8 bits are valid. */ 2236 2344 dest = SET_APIC_LOGICAL_ID(dest); 2237 - __target_IO_APIC_irq(irq, dest, cfg); 2345 + __target_IO_APIC_irq(irq, dest, data->chip_data); 2238 2346 } 2239 2347 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2240 - 2241 2348 return ret; 2242 - } 2243 - 2244 - static int 2245 - set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask) 2246 - { 2247 - struct irq_desc *desc; 2248 - 2249 - desc = irq_to_desc(irq); 2250 - 2251 - return set_ioapic_affinity_irq_desc(desc, mask); 2252 2349 } 2253 2350 2254 2351 #ifdef CONFIG_INTR_REMAP ··· 2246 2381 * the interrupt-remapping table entry. 2247 2382 */ 2248 2383 static int 2249 - migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask) 2384 + ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 2385 + bool force) 2250 2386 { 2251 - struct irq_cfg *cfg; 2387 + struct irq_cfg *cfg = data->chip_data; 2388 + unsigned int dest, irq = data->irq; 2252 2389 struct irte irte; 2253 - unsigned int dest; 2254 - unsigned int irq; 2255 - int ret = -1; 2256 2390 2257 2391 if (!cpumask_intersects(mask, cpu_online_mask)) 2258 - return ret; 2392 + return -EINVAL; 2259 2393 2260 - irq = desc->irq; 2261 2394 if (get_irte(irq, &irte)) 2262 - return ret; 2395 + return -EBUSY; 2263 2396 2264 - cfg = desc->chip_data; 2265 2397 if (assign_irq_vector(irq, cfg, mask)) 2266 - return ret; 2398 + return -EBUSY; 2267 2399 2268 2400 dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask); 2269 2401 ··· 2275 2413 if (cfg->move_in_progress) 2276 2414 send_cleanup_vector(cfg); 2277 2415 2278 - cpumask_copy(desc->affinity, mask); 2279 - 2416 + cpumask_copy(data->affinity, mask); 2280 2417 return 0; 2281 2418 } 2282 2419 2283 - /* 2284 - * Migrates the IRQ destination in the process context. 2285 - */ 2286 - static int set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, 2287 - const struct cpumask *mask) 2288 - { 2289 - return migrate_ioapic_irq_desc(desc, mask); 2290 - } 2291 - static int set_ir_ioapic_affinity_irq(unsigned int irq, 2292 - const struct cpumask *mask) 2293 - { 2294 - struct irq_desc *desc = irq_to_desc(irq); 2295 - 2296 - return set_ir_ioapic_affinity_irq_desc(desc, mask); 2297 - } 2298 2420 #else 2299 - static inline int set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, 2300 - const struct cpumask *mask) 2421 + static inline int 2422 + ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 2423 + bool force) 2301 2424 { 2302 2425 return 0; 2303 2426 } ··· 2344 2497 irq_exit(); 2345 2498 } 2346 2499 2347 - static void __irq_complete_move(struct irq_desc **descp, unsigned vector) 2500 + static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector) 2348 2501 { 2349 - struct irq_desc *desc = *descp; 2350 - struct irq_cfg *cfg = desc->chip_data; 2351 2502 unsigned me; 2352 2503 2353 2504 if (likely(!cfg->move_in_progress)) ··· 2357 2512 send_cleanup_vector(cfg); 2358 2513 } 2359 2514 2360 - static void irq_complete_move(struct irq_desc **descp) 2515 + static void irq_complete_move(struct irq_cfg *cfg) 2361 2516 { 2362 - __irq_complete_move(descp, ~get_irq_regs()->orig_ax); 2517 + __irq_complete_move(cfg, ~get_irq_regs()->orig_ax); 2363 2518 } 2364 2519 2365 2520 void irq_force_complete_move(int irq) 2366 2521 { 2367 - struct irq_desc *desc = irq_to_desc(irq); 2368 - struct irq_cfg *cfg = desc->chip_data; 2522 + struct irq_cfg *cfg = get_irq_chip_data(irq); 2369 2523 2370 2524 if (!cfg) 2371 2525 return; 2372 2526 2373 - __irq_complete_move(&desc, cfg->vector); 2527 + __irq_complete_move(cfg, cfg->vector); 2374 2528 } 2375 2529 #else 2376 - static inline void irq_complete_move(struct irq_desc **descp) {} 2530 + static inline void irq_complete_move(struct irq_cfg *cfg) { } 2377 2531 #endif 2378 2532 2379 - static void ack_apic_edge(unsigned int irq) 2533 + static void ack_apic_edge(struct irq_data *data) 2380 2534 { 2381 - struct irq_desc *desc = irq_to_desc(irq); 2382 - 2383 - irq_complete_move(&desc); 2384 - move_native_irq(irq); 2535 + irq_complete_move(data->chip_data); 2536 + move_native_irq(data->irq); 2385 2537 ack_APIC_irq(); 2386 2538 } 2387 2539 ··· 2400 2558 * Otherwise, we simulate the EOI message manually by changing the trigger 2401 2559 * mode to edge and then back to level, with RTE being masked during this. 2402 2560 */ 2403 - static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) 2561 + static void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) 2404 2562 { 2405 2563 struct irq_pin_list *entry; 2564 + unsigned long flags; 2406 2565 2566 + raw_spin_lock_irqsave(&ioapic_lock, flags); 2407 2567 for_each_irq_pin(entry, cfg->irq_2_pin) { 2408 2568 if (mp_ioapics[entry->apic].apicver >= 0x20) { 2409 2569 /* ··· 2414 2570 * intr-remapping table entry. Hence for the io-apic 2415 2571 * EOI we use the pin number. 2416 2572 */ 2417 - if (irq_remapped(irq)) 2573 + if (irq_remapped(cfg)) 2418 2574 io_apic_eoi(entry->apic, entry->pin); 2419 2575 else 2420 2576 io_apic_eoi(entry->apic, cfg->vector); ··· 2423 2579 __unmask_and_level_IO_APIC_irq(entry); 2424 2580 } 2425 2581 } 2426 - } 2427 - 2428 - static void eoi_ioapic_irq(struct irq_desc *desc) 2429 - { 2430 - struct irq_cfg *cfg; 2431 - unsigned long flags; 2432 - unsigned int irq; 2433 - 2434 - irq = desc->irq; 2435 - cfg = desc->chip_data; 2436 - 2437 - raw_spin_lock_irqsave(&ioapic_lock, flags); 2438 - __eoi_ioapic_irq(irq, cfg); 2439 2582 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2440 2583 } 2441 2584 2442 - static void ack_apic_level(unsigned int irq) 2585 + static void ack_apic_level(struct irq_data *data) 2443 2586 { 2587 + struct irq_cfg *cfg = data->chip_data; 2588 + int i, do_unmask_irq = 0, irq = data->irq; 2444 2589 struct irq_desc *desc = irq_to_desc(irq); 2445 2590 unsigned long v; 2446 - int i; 2447 - struct irq_cfg *cfg; 2448 - int do_unmask_irq = 0; 2449 2591 2450 - irq_complete_move(&desc); 2592 + irq_complete_move(cfg); 2451 2593 #ifdef CONFIG_GENERIC_PENDING_IRQ 2452 2594 /* If we are moving the irq we need to mask it */ 2453 2595 if (unlikely(desc->status & IRQ_MOVE_PENDING)) { 2454 2596 do_unmask_irq = 1; 2455 - mask_IO_APIC_irq_desc(desc); 2597 + mask_ioapic(cfg); 2456 2598 } 2457 2599 #endif 2458 2600 ··· 2474 2644 * we use the above logic (mask+edge followed by unmask+level) from 2475 2645 * Manfred Spraul to clear the remote IRR. 2476 2646 */ 2477 - cfg = desc->chip_data; 2478 2647 i = cfg->vector; 2479 2648 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); 2480 2649 ··· 2493 2664 if (!(v & (1 << (i & 0x1f)))) { 2494 2665 atomic_inc(&irq_mis_count); 2495 2666 2496 - eoi_ioapic_irq(desc); 2667 + eoi_ioapic_irq(irq, cfg); 2497 2668 } 2498 2669 2499 2670 /* Now we can move and renable the irq */ ··· 2524 2695 * accurate and is causing problems then it is a hardware bug 2525 2696 * and you can go talk to the chipset vendor about it. 2526 2697 */ 2527 - cfg = desc->chip_data; 2528 2698 if (!io_apic_level_ack_pending(cfg)) 2529 2699 move_masked_irq(irq); 2530 - unmask_IO_APIC_irq_desc(desc); 2700 + unmask_ioapic(cfg); 2531 2701 } 2532 2702 } 2533 2703 2534 2704 #ifdef CONFIG_INTR_REMAP 2535 - static void ir_ack_apic_edge(unsigned int irq) 2705 + static void ir_ack_apic_edge(struct irq_data *data) 2536 2706 { 2537 2707 ack_APIC_irq(); 2538 2708 } 2539 2709 2540 - static void ir_ack_apic_level(unsigned int irq) 2710 + static void ir_ack_apic_level(struct irq_data *data) 2541 2711 { 2542 - struct irq_desc *desc = irq_to_desc(irq); 2543 - 2544 2712 ack_APIC_irq(); 2545 - eoi_ioapic_irq(desc); 2713 + eoi_ioapic_irq(data->irq, data->chip_data); 2546 2714 } 2547 2715 #endif /* CONFIG_INTR_REMAP */ 2548 2716 2549 2717 static struct irq_chip ioapic_chip __read_mostly = { 2550 - .name = "IO-APIC", 2551 - .startup = startup_ioapic_irq, 2552 - .mask = mask_IO_APIC_irq, 2553 - .unmask = unmask_IO_APIC_irq, 2554 - .ack = ack_apic_edge, 2555 - .eoi = ack_apic_level, 2718 + .name = "IO-APIC", 2719 + .irq_startup = startup_ioapic_irq, 2720 + .irq_mask = mask_ioapic_irq, 2721 + .irq_unmask = unmask_ioapic_irq, 2722 + .irq_ack = ack_apic_edge, 2723 + .irq_eoi = ack_apic_level, 2556 2724 #ifdef CONFIG_SMP 2557 - .set_affinity = set_ioapic_affinity_irq, 2725 + .irq_set_affinity = ioapic_set_affinity, 2558 2726 #endif 2559 - .retrigger = ioapic_retrigger_irq, 2727 + .irq_retrigger = ioapic_retrigger_irq, 2560 2728 }; 2561 2729 2562 2730 static struct irq_chip ir_ioapic_chip __read_mostly = { 2563 - .name = "IR-IO-APIC", 2564 - .startup = startup_ioapic_irq, 2565 - .mask = mask_IO_APIC_irq, 2566 - .unmask = unmask_IO_APIC_irq, 2731 + .name = "IR-IO-APIC", 2732 + .irq_startup = startup_ioapic_irq, 2733 + .irq_mask = mask_ioapic_irq, 2734 + .irq_unmask = unmask_ioapic_irq, 2567 2735 #ifdef CONFIG_INTR_REMAP 2568 - .ack = ir_ack_apic_edge, 2569 - .eoi = ir_ack_apic_level, 2736 + .irq_ack = ir_ack_apic_edge, 2737 + .irq_eoi = ir_ack_apic_level, 2570 2738 #ifdef CONFIG_SMP 2571 - .set_affinity = set_ir_ioapic_affinity_irq, 2739 + .irq_set_affinity = ir_ioapic_set_affinity, 2572 2740 #endif 2573 2741 #endif 2574 - .retrigger = ioapic_retrigger_irq, 2742 + .irq_retrigger = ioapic_retrigger_irq, 2575 2743 }; 2576 2744 2577 2745 static inline void init_IO_APIC_traps(void) 2578 2746 { 2579 - int irq; 2580 - struct irq_desc *desc; 2581 2747 struct irq_cfg *cfg; 2748 + unsigned int irq; 2582 2749 2583 2750 /* 2584 2751 * NOTE! The local APIC isn't very good at handling ··· 2587 2762 * Also, we've got to be careful not to trash gate 2588 2763 * 0x80, because int 0x80 is hm, kind of importantish. ;) 2589 2764 */ 2590 - for_each_irq_desc(irq, desc) { 2591 - cfg = desc->chip_data; 2765 + for_each_active_irq(irq) { 2766 + cfg = get_irq_chip_data(irq); 2592 2767 if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) { 2593 2768 /* 2594 2769 * Hmm.. We don't have an entry for this, ··· 2599 2774 legacy_pic->make_irq(irq); 2600 2775 else 2601 2776 /* Strange. Oh, well.. */ 2602 - desc->chip = &no_irq_chip; 2777 + set_irq_chip(irq, &no_irq_chip); 2603 2778 } 2604 2779 } 2605 2780 } ··· 2608 2783 * The local APIC irq-chip implementation: 2609 2784 */ 2610 2785 2611 - static void mask_lapic_irq(unsigned int irq) 2786 + static void mask_lapic_irq(struct irq_data *data) 2612 2787 { 2613 2788 unsigned long v; 2614 2789 ··· 2616 2791 apic_write(APIC_LVT0, v | APIC_LVT_MASKED); 2617 2792 } 2618 2793 2619 - static void unmask_lapic_irq(unsigned int irq) 2794 + static void unmask_lapic_irq(struct irq_data *data) 2620 2795 { 2621 2796 unsigned long v; 2622 2797 ··· 2624 2799 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED); 2625 2800 } 2626 2801 2627 - static void ack_lapic_irq(unsigned int irq) 2802 + static void ack_lapic_irq(struct irq_data *data) 2628 2803 { 2629 2804 ack_APIC_irq(); 2630 2805 } 2631 2806 2632 2807 static struct irq_chip lapic_chip __read_mostly = { 2633 2808 .name = "local-APIC", 2634 - .mask = mask_lapic_irq, 2635 - .unmask = unmask_lapic_irq, 2636 - .ack = ack_lapic_irq, 2809 + .irq_mask = mask_lapic_irq, 2810 + .irq_unmask = unmask_lapic_irq, 2811 + .irq_ack = ack_lapic_irq, 2637 2812 }; 2638 2813 2639 - static void lapic_register_intr(int irq, struct irq_desc *desc) 2814 + static void lapic_register_intr(int irq) 2640 2815 { 2641 - desc->status &= ~IRQ_LEVEL; 2816 + irq_clear_status_flags(irq, IRQ_LEVEL); 2642 2817 set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, 2643 2818 "edge"); 2644 2819 } ··· 2741 2916 */ 2742 2917 static inline void __init check_timer(void) 2743 2918 { 2744 - struct irq_desc *desc = irq_to_desc(0); 2745 - struct irq_cfg *cfg = desc->chip_data; 2919 + struct irq_cfg *cfg = get_irq_chip_data(0); 2746 2920 int node = cpu_to_node(0); 2747 2921 int apic1, pin1, apic2, pin2; 2748 2922 unsigned long flags; ··· 2752 2928 /* 2753 2929 * get/set the timer IRQ vector: 2754 2930 */ 2755 - legacy_pic->chip->mask(0); 2931 + legacy_pic->mask(0); 2756 2932 assign_irq_vector(0, cfg, apic->target_cpus()); 2757 2933 2758 2934 /* ··· 2811 2987 add_pin_to_irq_node(cfg, node, apic1, pin1); 2812 2988 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); 2813 2989 } else { 2814 - /* for edge trigger, setup_IO_APIC_irq already 2990 + /* for edge trigger, setup_ioapic_irq already 2815 2991 * leave it unmasked. 2816 2992 * so only need to unmask if it is level-trigger 2817 2993 * do we really have level trigger timer? ··· 2819 2995 int idx; 2820 2996 idx = find_irq_entry(apic1, pin1, mp_INT); 2821 2997 if (idx != -1 && irq_trigger(idx)) 2822 - unmask_IO_APIC_irq_desc(desc); 2998 + unmask_ioapic(cfg); 2823 2999 } 2824 3000 if (timer_irq_works()) { 2825 3001 if (nmi_watchdog == NMI_IO_APIC) { 2826 3002 setup_nmi(); 2827 - legacy_pic->chip->unmask(0); 3003 + legacy_pic->unmask(0); 2828 3004 } 2829 3005 if (disable_timer_pin_1 > 0) 2830 3006 clear_IO_APIC_pin(0, pin1); ··· 2847 3023 */ 2848 3024 replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2); 2849 3025 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); 2850 - legacy_pic->chip->unmask(0); 3026 + legacy_pic->unmask(0); 2851 3027 if (timer_irq_works()) { 2852 3028 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); 2853 3029 timer_through_8259 = 1; 2854 3030 if (nmi_watchdog == NMI_IO_APIC) { 2855 - legacy_pic->chip->mask(0); 3031 + legacy_pic->mask(0); 2856 3032 setup_nmi(); 2857 - legacy_pic->chip->unmask(0); 3033 + legacy_pic->unmask(0); 2858 3034 } 2859 3035 goto out; 2860 3036 } ··· 2862 3038 * Cleanup, just in case ... 2863 3039 */ 2864 3040 local_irq_disable(); 2865 - legacy_pic->chip->mask(0); 3041 + legacy_pic->mask(0); 2866 3042 clear_IO_APIC_pin(apic2, pin2); 2867 3043 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); 2868 3044 } ··· 2879 3055 apic_printk(APIC_QUIET, KERN_INFO 2880 3056 "...trying to set up timer as Virtual Wire IRQ...\n"); 2881 3057 2882 - lapic_register_intr(0, desc); 3058 + lapic_register_intr(0); 2883 3059 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */ 2884 - legacy_pic->chip->unmask(0); 3060 + legacy_pic->unmask(0); 2885 3061 2886 3062 if (timer_irq_works()) { 2887 3063 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); 2888 3064 goto out; 2889 3065 } 2890 3066 local_irq_disable(); 2891 - legacy_pic->chip->mask(0); 3067 + legacy_pic->mask(0); 2892 3068 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); 2893 3069 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n"); 2894 3070 ··· 3054 3230 /* 3055 3231 * Dynamic irq allocate and deallocation 3056 3232 */ 3057 - unsigned int create_irq_nr(unsigned int irq_want, int node) 3233 + unsigned int create_irq_nr(unsigned int from, int node) 3058 3234 { 3059 - /* Allocate an unused irq */ 3060 - unsigned int irq; 3061 - unsigned int new; 3235 + struct irq_cfg *cfg; 3062 3236 unsigned long flags; 3063 - struct irq_cfg *cfg_new = NULL; 3064 - struct irq_desc *desc_new = NULL; 3237 + unsigned int ret = 0; 3238 + int irq; 3065 3239 3066 - irq = 0; 3067 - if (irq_want < nr_irqs_gsi) 3068 - irq_want = nr_irqs_gsi; 3240 + if (from < nr_irqs_gsi) 3241 + from = nr_irqs_gsi; 3242 + 3243 + irq = alloc_irq_from(from, node); 3244 + if (irq < 0) 3245 + return 0; 3246 + cfg = alloc_irq_cfg(irq, node); 3247 + if (!cfg) { 3248 + free_irq_at(irq, NULL); 3249 + return 0; 3250 + } 3069 3251 3070 3252 raw_spin_lock_irqsave(&vector_lock, flags); 3071 - for (new = irq_want; new < nr_irqs; new++) { 3072 - desc_new = irq_to_desc_alloc_node(new, node); 3073 - if (!desc_new) { 3074 - printk(KERN_INFO "can not get irq_desc for %d\n", new); 3075 - continue; 3076 - } 3077 - cfg_new = desc_new->chip_data; 3078 - 3079 - if (cfg_new->vector != 0) 3080 - continue; 3081 - 3082 - desc_new = move_irq_desc(desc_new, node); 3083 - cfg_new = desc_new->chip_data; 3084 - 3085 - if (__assign_irq_vector(new, cfg_new, apic->target_cpus()) == 0) 3086 - irq = new; 3087 - break; 3088 - } 3253 + if (!__assign_irq_vector(irq, cfg, apic->target_cpus())) 3254 + ret = irq; 3089 3255 raw_spin_unlock_irqrestore(&vector_lock, flags); 3090 3256 3091 - if (irq > 0) 3092 - dynamic_irq_init_keep_chip_data(irq); 3093 - 3094 - return irq; 3257 + if (ret) { 3258 + set_irq_chip_data(irq, cfg); 3259 + irq_clear_status_flags(irq, IRQ_NOREQUEST); 3260 + } else { 3261 + free_irq_at(irq, cfg); 3262 + } 3263 + return ret; 3095 3264 } 3096 3265 3097 3266 int create_irq(void) ··· 3104 3287 3105 3288 void destroy_irq(unsigned int irq) 3106 3289 { 3290 + struct irq_cfg *cfg = get_irq_chip_data(irq); 3107 3291 unsigned long flags; 3108 3292 3109 - dynamic_irq_cleanup_keep_chip_data(irq); 3293 + irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE); 3110 3294 3111 - free_irte(irq); 3295 + if (intr_remapping_enabled) 3296 + free_irte(irq); 3112 3297 raw_spin_lock_irqsave(&vector_lock, flags); 3113 - __clear_irq_vector(irq, get_irq_chip_data(irq)); 3298 + __clear_irq_vector(irq, cfg); 3114 3299 raw_spin_unlock_irqrestore(&vector_lock, flags); 3300 + free_irq_at(irq, cfg); 3115 3301 } 3116 3302 3117 3303 /* ··· 3138 3318 3139 3319 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); 3140 3320 3141 - if (irq_remapped(irq)) { 3321 + if (irq_remapped(get_irq_chip_data(irq))) { 3142 3322 struct irte irte; 3143 3323 int ir_index; 3144 3324 u16 sub_handle; ··· 3191 3371 } 3192 3372 3193 3373 #ifdef CONFIG_SMP 3194 - static int set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) 3374 + static int 3375 + msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) 3195 3376 { 3196 - struct irq_desc *desc = irq_to_desc(irq); 3197 - struct irq_cfg *cfg; 3377 + struct irq_cfg *cfg = data->chip_data; 3198 3378 struct msi_msg msg; 3199 3379 unsigned int dest; 3200 3380 3201 - if (set_desc_affinity(desc, mask, &dest)) 3381 + if (__ioapic_set_affinity(data, mask, &dest)) 3202 3382 return -1; 3203 3383 3204 - cfg = desc->chip_data; 3205 - 3206 - get_cached_msi_msg_desc(desc, &msg); 3384 + __get_cached_msi_msg(data->msi_desc, &msg); 3207 3385 3208 3386 msg.data &= ~MSI_DATA_VECTOR_MASK; 3209 3387 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3210 3388 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3211 3389 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3212 3390 3213 - write_msi_msg_desc(desc, &msg); 3391 + __write_msi_msg(data->msi_desc, &msg); 3214 3392 3215 3393 return 0; 3216 3394 } ··· 3218 3400 * done in the process context using interrupt-remapping hardware. 3219 3401 */ 3220 3402 static int 3221 - ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) 3403 + ir_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, 3404 + bool force) 3222 3405 { 3223 - struct irq_desc *desc = irq_to_desc(irq); 3224 - struct irq_cfg *cfg = desc->chip_data; 3225 - unsigned int dest; 3406 + struct irq_cfg *cfg = data->chip_data; 3407 + unsigned int dest, irq = data->irq; 3226 3408 struct irte irte; 3227 3409 3228 3410 if (get_irte(irq, &irte)) 3229 3411 return -1; 3230 3412 3231 - if (set_desc_affinity(desc, mask, &dest)) 3413 + if (__ioapic_set_affinity(data, mask, &dest)) 3232 3414 return -1; 3233 3415 3234 3416 irte.vector = cfg->vector; ··· 3258 3440 * which implement the MSI or MSI-X Capability Structure. 3259 3441 */ 3260 3442 static struct irq_chip msi_chip = { 3261 - .name = "PCI-MSI", 3262 - .unmask = unmask_msi_irq, 3263 - .mask = mask_msi_irq, 3264 - .ack = ack_apic_edge, 3443 + .name = "PCI-MSI", 3444 + .irq_unmask = unmask_msi_irq, 3445 + .irq_mask = mask_msi_irq, 3446 + .irq_ack = ack_apic_edge, 3265 3447 #ifdef CONFIG_SMP 3266 - .set_affinity = set_msi_irq_affinity, 3448 + .irq_set_affinity = msi_set_affinity, 3267 3449 #endif 3268 - .retrigger = ioapic_retrigger_irq, 3450 + .irq_retrigger = ioapic_retrigger_irq, 3269 3451 }; 3270 3452 3271 3453 static struct irq_chip msi_ir_chip = { 3272 - .name = "IR-PCI-MSI", 3273 - .unmask = unmask_msi_irq, 3274 - .mask = mask_msi_irq, 3454 + .name = "IR-PCI-MSI", 3455 + .irq_unmask = unmask_msi_irq, 3456 + .irq_mask = mask_msi_irq, 3275 3457 #ifdef CONFIG_INTR_REMAP 3276 - .ack = ir_ack_apic_edge, 3458 + .irq_ack = ir_ack_apic_edge, 3277 3459 #ifdef CONFIG_SMP 3278 - .set_affinity = ir_set_msi_irq_affinity, 3460 + .irq_set_affinity = ir_msi_set_affinity, 3279 3461 #endif 3280 3462 #endif 3281 - .retrigger = ioapic_retrigger_irq, 3463 + .irq_retrigger = ioapic_retrigger_irq, 3282 3464 }; 3283 3465 3284 3466 /* ··· 3310 3492 3311 3493 static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) 3312 3494 { 3313 - int ret; 3314 3495 struct msi_msg msg; 3496 + int ret; 3315 3497 3316 3498 ret = msi_compose_msg(dev, irq, &msg, -1); 3317 3499 if (ret < 0) ··· 3320 3502 set_irq_msi(irq, msidesc); 3321 3503 write_msi_msg(irq, &msg); 3322 3504 3323 - if (irq_remapped(irq)) { 3324 - struct irq_desc *desc = irq_to_desc(irq); 3325 - /* 3326 - * irq migration in process context 3327 - */ 3328 - desc->status |= IRQ_MOVE_PCNTXT; 3505 + if (irq_remapped(get_irq_chip_data(irq))) { 3506 + irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); 3329 3507 set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge"); 3330 3508 } else 3331 3509 set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge"); ··· 3333 3519 3334 3520 int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) 3335 3521 { 3336 - unsigned int irq; 3337 - int ret, sub_handle; 3522 + int node, ret, sub_handle, index = 0; 3523 + unsigned int irq, irq_want; 3338 3524 struct msi_desc *msidesc; 3339 - unsigned int irq_want; 3340 3525 struct intel_iommu *iommu = NULL; 3341 - int index = 0; 3342 - int node; 3343 3526 3344 3527 /* x86 doesn't support multiple MSI yet */ 3345 3528 if (type == PCI_CAP_ID_MSI && nvec > 1) ··· 3396 3585 3397 3586 #if defined (CONFIG_DMAR) || defined (CONFIG_INTR_REMAP) 3398 3587 #ifdef CONFIG_SMP 3399 - static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) 3588 + static int 3589 + dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, 3590 + bool force) 3400 3591 { 3401 - struct irq_desc *desc = irq_to_desc(irq); 3402 - struct irq_cfg *cfg; 3592 + struct irq_cfg *cfg = data->chip_data; 3593 + unsigned int dest, irq = data->irq; 3403 3594 struct msi_msg msg; 3404 - unsigned int dest; 3405 3595 3406 - if (set_desc_affinity(desc, mask, &dest)) 3596 + if (__ioapic_set_affinity(data, mask, &dest)) 3407 3597 return -1; 3408 - 3409 - cfg = desc->chip_data; 3410 3598 3411 3599 dmar_msi_read(irq, &msg); 3412 3600 ··· 3422 3612 #endif /* CONFIG_SMP */ 3423 3613 3424 3614 static struct irq_chip dmar_msi_type = { 3425 - .name = "DMAR_MSI", 3426 - .unmask = dmar_msi_unmask, 3427 - .mask = dmar_msi_mask, 3428 - .ack = ack_apic_edge, 3615 + .name = "DMAR_MSI", 3616 + .irq_unmask = dmar_msi_unmask, 3617 + .irq_mask = dmar_msi_mask, 3618 + .irq_ack = ack_apic_edge, 3429 3619 #ifdef CONFIG_SMP 3430 - .set_affinity = dmar_msi_set_affinity, 3620 + .irq_set_affinity = dmar_msi_set_affinity, 3431 3621 #endif 3432 - .retrigger = ioapic_retrigger_irq, 3622 + .irq_retrigger = ioapic_retrigger_irq, 3433 3623 }; 3434 3624 3435 3625 int arch_setup_dmar_msi(unsigned int irq) ··· 3450 3640 #ifdef CONFIG_HPET_TIMER 3451 3641 3452 3642 #ifdef CONFIG_SMP 3453 - static int hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask) 3643 + static int hpet_msi_set_affinity(struct irq_data *data, 3644 + const struct cpumask *mask, bool force) 3454 3645 { 3455 - struct irq_desc *desc = irq_to_desc(irq); 3456 - struct irq_cfg *cfg; 3646 + struct irq_cfg *cfg = data->chip_data; 3457 3647 struct msi_msg msg; 3458 3648 unsigned int dest; 3459 3649 3460 - if (set_desc_affinity(desc, mask, &dest)) 3650 + if (__ioapic_set_affinity(data, mask, &dest)) 3461 3651 return -1; 3462 3652 3463 - cfg = desc->chip_data; 3464 - 3465 - hpet_msi_read(irq, &msg); 3653 + hpet_msi_read(data->handler_data, &msg); 3466 3654 3467 3655 msg.data &= ~MSI_DATA_VECTOR_MASK; 3468 3656 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3469 3657 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3470 3658 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3471 3659 3472 - hpet_msi_write(irq, &msg); 3660 + hpet_msi_write(data->handler_data, &msg); 3473 3661 3474 3662 return 0; 3475 3663 } ··· 3475 3667 #endif /* CONFIG_SMP */ 3476 3668 3477 3669 static struct irq_chip ir_hpet_msi_type = { 3478 - .name = "IR-HPET_MSI", 3479 - .unmask = hpet_msi_unmask, 3480 - .mask = hpet_msi_mask, 3670 + .name = "IR-HPET_MSI", 3671 + .irq_unmask = hpet_msi_unmask, 3672 + .irq_mask = hpet_msi_mask, 3481 3673 #ifdef CONFIG_INTR_REMAP 3482 - .ack = ir_ack_apic_edge, 3674 + .irq_ack = ir_ack_apic_edge, 3483 3675 #ifdef CONFIG_SMP 3484 - .set_affinity = ir_set_msi_irq_affinity, 3676 + .irq_set_affinity = ir_msi_set_affinity, 3485 3677 #endif 3486 3678 #endif 3487 - .retrigger = ioapic_retrigger_irq, 3679 + .irq_retrigger = ioapic_retrigger_irq, 3488 3680 }; 3489 3681 3490 3682 static struct irq_chip hpet_msi_type = { 3491 3683 .name = "HPET_MSI", 3492 - .unmask = hpet_msi_unmask, 3493 - .mask = hpet_msi_mask, 3494 - .ack = ack_apic_edge, 3684 + .irq_unmask = hpet_msi_unmask, 3685 + .irq_mask = hpet_msi_mask, 3686 + .irq_ack = ack_apic_edge, 3495 3687 #ifdef CONFIG_SMP 3496 - .set_affinity = hpet_msi_set_affinity, 3688 + .irq_set_affinity = hpet_msi_set_affinity, 3497 3689 #endif 3498 - .retrigger = ioapic_retrigger_irq, 3690 + .irq_retrigger = ioapic_retrigger_irq, 3499 3691 }; 3500 3692 3501 3693 int arch_setup_hpet_msi(unsigned int irq, unsigned int id) 3502 3694 { 3503 - int ret; 3504 3695 struct msi_msg msg; 3505 - struct irq_desc *desc = irq_to_desc(irq); 3696 + int ret; 3506 3697 3507 3698 if (intr_remapping_enabled) { 3508 3699 struct intel_iommu *iommu = map_hpet_to_ir(id); ··· 3519 3712 if (ret < 0) 3520 3713 return ret; 3521 3714 3522 - hpet_msi_write(irq, &msg); 3523 - desc->status |= IRQ_MOVE_PCNTXT; 3524 - if (irq_remapped(irq)) 3715 + hpet_msi_write(get_irq_data(irq), &msg); 3716 + irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); 3717 + if (irq_remapped(get_irq_chip_data(irq))) 3525 3718 set_irq_chip_and_handler_name(irq, &ir_hpet_msi_type, 3526 3719 handle_edge_irq, "edge"); 3527 3720 else ··· 3554 3747 write_ht_irq_msg(irq, &msg); 3555 3748 } 3556 3749 3557 - static int set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask) 3750 + static int 3751 + ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) 3558 3752 { 3559 - struct irq_desc *desc = irq_to_desc(irq); 3560 - struct irq_cfg *cfg; 3753 + struct irq_cfg *cfg = data->chip_data; 3561 3754 unsigned int dest; 3562 3755 3563 - if (set_desc_affinity(desc, mask, &dest)) 3756 + if (__ioapic_set_affinity(data, mask, &dest)) 3564 3757 return -1; 3565 3758 3566 - cfg = desc->chip_data; 3567 - 3568 - target_ht_irq(irq, dest, cfg->vector); 3569 - 3759 + target_ht_irq(data->irq, dest, cfg->vector); 3570 3760 return 0; 3571 3761 } 3572 3762 3573 3763 #endif 3574 3764 3575 3765 static struct irq_chip ht_irq_chip = { 3576 - .name = "PCI-HT", 3577 - .mask = mask_ht_irq, 3578 - .unmask = unmask_ht_irq, 3579 - .ack = ack_apic_edge, 3766 + .name = "PCI-HT", 3767 + .irq_mask = mask_ht_irq, 3768 + .irq_unmask = unmask_ht_irq, 3769 + .irq_ack = ack_apic_edge, 3580 3770 #ifdef CONFIG_SMP 3581 - .set_affinity = set_ht_irq_affinity, 3771 + .irq_set_affinity = ht_set_affinity, 3582 3772 #endif 3583 - .retrigger = ioapic_retrigger_irq, 3773 + .irq_retrigger = ioapic_retrigger_irq, 3584 3774 }; 3585 3775 3586 3776 int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) ··· 3668 3864 if (nr < nr_irqs) 3669 3865 nr_irqs = nr; 3670 3866 3671 - return 0; 3867 + return NR_IRQS_LEGACY; 3672 3868 } 3673 3869 #endif 3674 3870 3675 3871 static int __io_apic_set_pci_routing(struct device *dev, int irq, 3676 3872 struct io_apic_irq_attr *irq_attr) 3677 3873 { 3678 - struct irq_desc *desc; 3679 3874 struct irq_cfg *cfg; 3680 3875 int node; 3681 3876 int ioapic, pin; ··· 3692 3889 else 3693 3890 node = cpu_to_node(0); 3694 3891 3695 - desc = irq_to_desc_alloc_node(irq, node); 3696 - if (!desc) { 3697 - printk(KERN_INFO "can not get irq_desc %d\n", irq); 3892 + cfg = alloc_irq_and_cfg_at(irq, node); 3893 + if (!cfg) 3698 3894 return 0; 3699 - } 3700 3895 3701 3896 pin = irq_attr->ioapic_pin; 3702 3897 trigger = irq_attr->trigger; ··· 3704 3903 * IRQs < 16 are already in the irq_2_pin[] map 3705 3904 */ 3706 3905 if (irq >= legacy_pic->nr_legacy_irqs) { 3707 - cfg = desc->chip_data; 3708 - if (add_pin_to_irq_node_nopanic(cfg, node, ioapic, pin)) { 3906 + if (__add_pin_to_irq_node(cfg, node, ioapic, pin)) { 3709 3907 printk(KERN_INFO "can not add pin %d for irq %d\n", 3710 3908 pin, irq); 3711 3909 return 0; 3712 3910 } 3713 3911 } 3714 3912 3715 - setup_IO_APIC_irq(ioapic, pin, irq, desc, trigger, polarity); 3913 + setup_ioapic_irq(ioapic, pin, irq, cfg, trigger, polarity); 3716 3914 3717 3915 return 0; 3718 3916 } ··· 3904 4104 */ 3905 4105 if (desc->status & 3906 4106 (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) 3907 - mask = desc->affinity; 4107 + mask = desc->irq_data.affinity; 3908 4108 else 3909 4109 mask = apic->target_cpus(); 3910 4110 3911 4111 if (intr_remapping_enabled) 3912 - set_ir_ioapic_affinity_irq_desc(desc, mask); 4112 + ir_ioapic_set_affinity(&desc->irq_data, mask, false); 3913 4113 else 3914 - set_ioapic_affinity_irq_desc(desc, mask); 4114 + ioapic_set_affinity(&desc->irq_data, mask, false); 3915 4115 } 3916 4116 3917 4117 } ··· 4095 4295 void __init pre_init_apic_IRQ0(void) 4096 4296 { 4097 4297 struct irq_cfg *cfg; 4098 - struct irq_desc *desc; 4099 4298 4100 4299 printk(KERN_INFO "Early APIC setup for system timer0\n"); 4101 4300 #ifndef CONFIG_SMP 4102 4301 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid); 4103 4302 #endif 4104 - desc = irq_to_desc_alloc_node(0, 0); 4303 + /* Make sure the irq descriptor is set up */ 4304 + cfg = alloc_irq_and_cfg_at(0, 0); 4105 4305 4106 4306 setup_local_APIC(); 4107 4307 4108 - cfg = irq_cfg(0); 4109 4308 add_pin_to_irq_node(cfg, 0, 0, 0); 4110 4309 set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge"); 4111 4310 4112 - setup_IO_APIC_irq(0, 0, 0, desc, 0, 0); 4311 + setup_ioapic_irq(0, 0, 0, cfg, 0, 0); 4113 4312 }
+1 -1
arch/x86/kernel/apic/nmi.c
··· 178 178 error: 179 179 if (nmi_watchdog == NMI_IO_APIC) { 180 180 if (!timer_through_8259) 181 - legacy_pic->chip->mask(0); 181 + legacy_pic->mask(0); 182 182 on_each_cpu(__acpi_nmi_disable, NULL, 1); 183 183 } 184 184
+24 -3
arch/x86/kernel/cpu/mcheck/mce_amd.c
··· 131 131 u32 low = 0, high = 0, address = 0; 132 132 unsigned int bank, block; 133 133 struct thresh_restart tr; 134 - u8 lvt_off; 134 + int lvt_off = -1; 135 + u8 offset; 135 136 136 137 for (bank = 0; bank < NR_BANKS; ++bank) { 137 138 for (block = 0; block < NR_BLOCKS; ++block) { ··· 163 162 if (shared_bank[bank] && c->cpu_core_id) 164 163 break; 165 164 #endif 166 - lvt_off = setup_APIC_eilvt_mce(THRESHOLD_APIC_VECTOR, 167 - APIC_EILVT_MSG_FIX, 0); 165 + offset = (high & MASK_LVTOFF_HI) >> 20; 166 + if (lvt_off < 0) { 167 + if (setup_APIC_eilvt(offset, 168 + THRESHOLD_APIC_VECTOR, 169 + APIC_EILVT_MSG_FIX, 0)) { 170 + pr_err(FW_BUG "cpu %d, failed to " 171 + "setup threshold interrupt " 172 + "for bank %d, block %d " 173 + "(MSR%08X=0x%x%08x)", 174 + smp_processor_id(), bank, block, 175 + address, high, low); 176 + continue; 177 + } 178 + lvt_off = offset; 179 + } else if (lvt_off != offset) { 180 + pr_err(FW_BUG "cpu %d, invalid threshold " 181 + "interrupt offset %d for bank %d," 182 + "block %d (MSR%08X=0x%x%08x)", 183 + smp_processor_id(), lvt_off, bank, 184 + block, address, high, low); 185 + continue; 186 + } 168 187 169 188 high &= ~MASK_LVTOFF_HI; 170 189 high |= lvt_off << 20;
+6 -10
arch/x86/kernel/hpet.c
··· 440 440 static DEFINE_PER_CPU(struct hpet_dev *, cpu_hpet_dev); 441 441 static struct hpet_dev *hpet_devs; 442 442 443 - void hpet_msi_unmask(unsigned int irq) 443 + void hpet_msi_unmask(struct irq_data *data) 444 444 { 445 - struct hpet_dev *hdev = get_irq_data(irq); 445 + struct hpet_dev *hdev = data->handler_data; 446 446 unsigned int cfg; 447 447 448 448 /* unmask it */ ··· 451 451 hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); 452 452 } 453 453 454 - void hpet_msi_mask(unsigned int irq) 454 + void hpet_msi_mask(struct irq_data *data) 455 455 { 456 + struct hpet_dev *hdev = data->handler_data; 456 457 unsigned int cfg; 457 - struct hpet_dev *hdev = get_irq_data(irq); 458 458 459 459 /* mask it */ 460 460 cfg = hpet_readl(HPET_Tn_CFG(hdev->num)); ··· 462 462 hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); 463 463 } 464 464 465 - void hpet_msi_write(unsigned int irq, struct msi_msg *msg) 465 + void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg) 466 466 { 467 - struct hpet_dev *hdev = get_irq_data(irq); 468 - 469 467 hpet_writel(msg->data, HPET_Tn_ROUTE(hdev->num)); 470 468 hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hdev->num) + 4); 471 469 } 472 470 473 - void hpet_msi_read(unsigned int irq, struct msi_msg *msg) 471 + void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg) 474 472 { 475 - struct hpet_dev *hdev = get_irq_data(irq); 476 - 477 473 msg->data = hpet_readl(HPET_Tn_ROUTE(hdev->num)); 478 474 msg->address_lo = hpet_readl(HPET_Tn_ROUTE(hdev->num) + 4); 479 475 msg->address_hi = 0;
+32 -31
arch/x86/kernel/i8259.c
··· 29 29 * plus some generic x86 specific things if generic specifics makes 30 30 * any sense at all. 31 31 */ 32 + static void init_8259A(int auto_eoi); 32 33 33 34 static int i8259A_auto_eoi; 34 35 DEFINE_RAW_SPINLOCK(i8259A_lock); 35 - static void mask_and_ack_8259A(unsigned int); 36 - static void mask_8259A(void); 37 - static void unmask_8259A(void); 38 - static void disable_8259A_irq(unsigned int irq); 39 - static void enable_8259A_irq(unsigned int irq); 40 - static void init_8259A(int auto_eoi); 41 - static int i8259A_irq_pending(unsigned int irq); 42 - 43 - struct irq_chip i8259A_chip = { 44 - .name = "XT-PIC", 45 - .mask = disable_8259A_irq, 46 - .disable = disable_8259A_irq, 47 - .unmask = enable_8259A_irq, 48 - .mask_ack = mask_and_ack_8259A, 49 - }; 50 36 51 37 /* 52 38 * 8259A PIC functions to handle ISA devices: ··· 54 68 */ 55 69 unsigned long io_apic_irqs; 56 70 57 - static void disable_8259A_irq(unsigned int irq) 71 + static void mask_8259A_irq(unsigned int irq) 58 72 { 59 73 unsigned int mask = 1 << irq; 60 74 unsigned long flags; ··· 68 82 raw_spin_unlock_irqrestore(&i8259A_lock, flags); 69 83 } 70 84 71 - static void enable_8259A_irq(unsigned int irq) 85 + static void disable_8259A_irq(struct irq_data *data) 86 + { 87 + mask_8259A_irq(data->irq); 88 + } 89 + 90 + static void unmask_8259A_irq(unsigned int irq) 72 91 { 73 92 unsigned int mask = ~(1 << irq); 74 93 unsigned long flags; ··· 85 94 else 86 95 outb(cached_master_mask, PIC_MASTER_IMR); 87 96 raw_spin_unlock_irqrestore(&i8259A_lock, flags); 97 + } 98 + 99 + static void enable_8259A_irq(struct irq_data *data) 100 + { 101 + unmask_8259A_irq(data->irq); 88 102 } 89 103 90 104 static int i8259A_irq_pending(unsigned int irq) ··· 113 117 disable_irq_nosync(irq); 114 118 io_apic_irqs &= ~(1<<irq); 115 119 set_irq_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq, 116 - "XT"); 120 + i8259A_chip.name); 117 121 enable_irq(irq); 118 122 } 119 123 ··· 146 150 * first, _then_ send the EOI, and the order of EOI 147 151 * to the two 8259s is important! 148 152 */ 149 - static void mask_and_ack_8259A(unsigned int irq) 153 + static void mask_and_ack_8259A(struct irq_data *data) 150 154 { 155 + unsigned int irq = data->irq; 151 156 unsigned int irqmask = 1 << irq; 152 157 unsigned long flags; 153 158 ··· 219 222 goto handle_real_irq; 220 223 } 221 224 } 225 + 226 + struct irq_chip i8259A_chip = { 227 + .name = "XT-PIC", 228 + .irq_mask = disable_8259A_irq, 229 + .irq_disable = disable_8259A_irq, 230 + .irq_unmask = enable_8259A_irq, 231 + .irq_mask_ack = mask_and_ack_8259A, 232 + }; 222 233 223 234 static char irq_trigger[2]; 224 235 /** ··· 347 342 * In AEOI mode we just have to mask the interrupt 348 343 * when acking. 349 344 */ 350 - i8259A_chip.mask_ack = disable_8259A_irq; 345 + i8259A_chip.irq_mask_ack = disable_8259A_irq; 351 346 else 352 - i8259A_chip.mask_ack = mask_and_ack_8259A; 347 + i8259A_chip.irq_mask_ack = mask_and_ack_8259A; 353 348 354 349 udelay(100); /* wait for 8259A to initialize */ 355 350 ··· 368 363 static void legacy_pic_noop(void) { }; 369 364 static void legacy_pic_uint_noop(unsigned int unused) { }; 370 365 static void legacy_pic_int_noop(int unused) { }; 371 - 372 - static struct irq_chip dummy_pic_chip = { 373 - .name = "dummy pic", 374 - .mask = legacy_pic_uint_noop, 375 - .unmask = legacy_pic_uint_noop, 376 - .disable = legacy_pic_uint_noop, 377 - .mask_ack = legacy_pic_uint_noop, 378 - }; 379 366 static int legacy_pic_irq_pending_noop(unsigned int irq) 380 367 { 381 368 return 0; ··· 375 378 376 379 struct legacy_pic null_legacy_pic = { 377 380 .nr_legacy_irqs = 0, 378 - .chip = &dummy_pic_chip, 381 + .chip = &dummy_irq_chip, 382 + .mask = legacy_pic_uint_noop, 383 + .unmask = legacy_pic_uint_noop, 379 384 .mask_all = legacy_pic_noop, 380 385 .restore_mask = legacy_pic_noop, 381 386 .init = legacy_pic_int_noop, ··· 388 389 struct legacy_pic default_legacy_pic = { 389 390 .nr_legacy_irqs = NR_IRQS_LEGACY, 390 391 .chip = &i8259A_chip, 391 - .mask_all = mask_8259A, 392 + .mask = mask_8259A_irq, 393 + .unmask = unmask_8259A_irq, 394 + .mask_all = mask_8259A, 392 395 .restore_mask = unmask_8259A, 393 396 .init = init_8259A, 394 397 .irq_pending = i8259A_irq_pending,
+13 -11
arch/x86/kernel/irq.c
··· 159 159 seq_printf(p, "%*d: ", prec, i); 160 160 for_each_online_cpu(j) 161 161 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); 162 - seq_printf(p, " %8s", desc->chip->name); 162 + seq_printf(p, " %8s", desc->irq_data.chip->name); 163 163 seq_printf(p, "-%-8s", desc->name); 164 164 165 165 if (action) { ··· 282 282 unsigned int irq, vector; 283 283 static int warned; 284 284 struct irq_desc *desc; 285 + struct irq_data *data; 285 286 286 287 for_each_irq_desc(irq, desc) { 287 288 int break_affinity = 0; ··· 297 296 /* interrupt's are disabled at this point */ 298 297 raw_spin_lock(&desc->lock); 299 298 300 - affinity = desc->affinity; 299 + data = &desc->irq_data; 300 + affinity = data->affinity; 301 301 if (!irq_has_action(irq) || 302 302 cpumask_equal(affinity, cpu_online_mask)) { 303 303 raw_spin_unlock(&desc->lock); ··· 317 315 affinity = cpu_all_mask; 318 316 } 319 317 320 - if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->mask) 321 - desc->chip->mask(irq); 318 + if (!(desc->status & IRQ_MOVE_PCNTXT) && data->chip->irq_mask) 319 + data->chip->irq_mask(data); 322 320 323 - if (desc->chip->set_affinity) 324 - desc->chip->set_affinity(irq, affinity); 321 + if (data->chip->irq_set_affinity) 322 + data->chip->irq_set_affinity(data, affinity, true); 325 323 else if (!(warned++)) 326 324 set_affinity = 0; 327 325 328 - if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->unmask) 329 - desc->chip->unmask(irq); 326 + if (!(desc->status & IRQ_MOVE_PCNTXT) && data->chip->irq_unmask) 327 + data->chip->irq_unmask(data); 330 328 331 329 raw_spin_unlock(&desc->lock); 332 330 ··· 357 355 if (irr & (1 << (vector % 32))) { 358 356 irq = __get_cpu_var(vector_irq)[vector]; 359 357 360 - desc = irq_to_desc(irq); 358 + data = irq_get_irq_data(irq); 361 359 raw_spin_lock(&desc->lock); 362 - if (desc->chip->retrigger) 363 - desc->chip->retrigger(irq); 360 + if (data->chip->irq_retrigger) 361 + data->chip->irq_retrigger(data); 364 362 raw_spin_unlock(&desc->lock); 365 363 } 366 364 }
+4 -13
arch/x86/kernel/irqinit.c
··· 100 100 101 101 void __init init_ISA_irqs(void) 102 102 { 103 + struct irq_chip *chip = legacy_pic->chip; 104 + const char *name = chip->name; 103 105 int i; 104 106 105 107 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) ··· 109 107 #endif 110 108 legacy_pic->init(0); 111 109 112 - /* 113 - * 16 old-style INTA-cycle interrupts: 114 - */ 115 - for (i = 0; i < legacy_pic->nr_legacy_irqs; i++) { 116 - struct irq_desc *desc = irq_to_desc(i); 117 - 118 - desc->status = IRQ_DISABLED; 119 - desc->action = NULL; 120 - desc->depth = 1; 121 - 122 - set_irq_chip_and_handler_name(i, &i8259A_chip, 123 - handle_level_irq, "XT"); 124 - } 110 + for (i = 0; i < legacy_pic->nr_legacy_irqs; i++) 111 + set_irq_chip_and_handler_name(i, chip, handle_level_irq, name); 125 112 } 126 113 127 114 void __init init_IRQ(void)
+2 -2
arch/x86/kernel/smpboot.c
··· 323 323 check_tsc_sync_target(); 324 324 325 325 if (nmi_watchdog == NMI_IO_APIC) { 326 - legacy_pic->chip->mask(0); 326 + legacy_pic->mask(0); 327 327 enable_NMI_through_LVT0(); 328 - legacy_pic->chip->unmask(0); 328 + legacy_pic->unmask(0); 329 329 } 330 330 331 331 /* This must be done before setting cpu_online_mask */
+19 -36
arch/x86/kernel/uv_irq.c
··· 28 28 static spinlock_t uv_irq_lock; 29 29 static struct rb_root uv_irq_root; 30 30 31 - static int uv_set_irq_affinity(unsigned int, const struct cpumask *); 31 + static int uv_set_irq_affinity(struct irq_data *, const struct cpumask *, bool); 32 32 33 - static void uv_noop(unsigned int irq) 34 - { 35 - } 33 + static void uv_noop(struct irq_data *data) { } 36 34 37 - static unsigned int uv_noop_ret(unsigned int irq) 38 - { 39 - return 0; 40 - } 41 - 42 - static void uv_ack_apic(unsigned int irq) 35 + static void uv_ack_apic(struct irq_data *data) 43 36 { 44 37 ack_APIC_irq(); 45 38 } 46 39 47 40 static struct irq_chip uv_irq_chip = { 48 - .name = "UV-CORE", 49 - .startup = uv_noop_ret, 50 - .shutdown = uv_noop, 51 - .enable = uv_noop, 52 - .disable = uv_noop, 53 - .ack = uv_noop, 54 - .mask = uv_noop, 55 - .unmask = uv_noop, 56 - .eoi = uv_ack_apic, 57 - .end = uv_noop, 58 - .set_affinity = uv_set_irq_affinity, 41 + .name = "UV-CORE", 42 + .irq_mask = uv_noop, 43 + .irq_unmask = uv_noop, 44 + .irq_eoi = uv_ack_apic, 45 + .irq_set_affinity = uv_set_irq_affinity, 59 46 }; 60 47 61 48 /* ··· 131 144 unsigned long mmr_offset, int limit) 132 145 { 133 146 const struct cpumask *eligible_cpu = cpumask_of(cpu); 134 - struct irq_desc *desc = irq_to_desc(irq); 135 - struct irq_cfg *cfg; 136 - int mmr_pnode; 147 + struct irq_cfg *cfg = get_irq_chip_data(irq); 137 148 unsigned long mmr_value; 138 149 struct uv_IO_APIC_route_entry *entry; 139 - int err; 150 + int mmr_pnode, err; 140 151 141 152 BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != 142 153 sizeof(unsigned long)); 143 - 144 - cfg = irq_cfg(irq); 145 154 146 155 err = assign_irq_vector(irq, cfg, eligible_cpu); 147 156 if (err != 0) 148 157 return err; 149 158 150 159 if (limit == UV_AFFINITY_CPU) 151 - desc->status |= IRQ_NO_BALANCING; 160 + irq_set_status_flags(irq, IRQ_NO_BALANCING); 152 161 else 153 - desc->status |= IRQ_MOVE_PCNTXT; 162 + irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); 154 163 155 164 set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq, 156 165 irq_name); ··· 189 206 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); 190 207 } 191 208 192 - static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask) 209 + static int 210 + uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask, 211 + bool force) 193 212 { 194 - struct irq_desc *desc = irq_to_desc(irq); 195 - struct irq_cfg *cfg = desc->chip_data; 213 + struct irq_cfg *cfg = data->chip_data; 196 214 unsigned int dest; 197 - unsigned long mmr_value; 215 + unsigned long mmr_value, mmr_offset; 198 216 struct uv_IO_APIC_route_entry *entry; 199 - unsigned long mmr_offset; 200 217 int mmr_pnode; 201 218 202 - if (set_desc_affinity(desc, mask, &dest)) 219 + if (__ioapic_set_affinity(data, mask, &dest)) 203 220 return -1; 204 221 205 222 mmr_value = 0; ··· 214 231 entry->dest = dest; 215 232 216 233 /* Get previously stored MMR and pnode of hub sourcing interrupts */ 217 - if (uv_irq_2_mmr_info(irq, &mmr_offset, &mmr_pnode)) 234 + if (uv_irq_2_mmr_info(data->irq, &mmr_offset, &mmr_pnode)) 218 235 return -1; 219 236 220 237 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
+42 -94
arch/x86/kernel/visws_quirks.c
··· 66 66 } 67 67 68 68 /* Replaces the default init_ISA_irqs in the generic setup */ 69 - static void __init visws_pre_intr_init(void) 70 - { 71 - init_VISWS_APIC_irqs(); 72 - } 69 + static void __init visws_pre_intr_init(void); 73 70 74 71 /* Quirk for machine specific memory setup. */ 75 72 ··· 426 429 /* 427 430 * This is the SGI Cobalt (IO-)APIC: 428 431 */ 429 - 430 - static void enable_cobalt_irq(unsigned int irq) 432 + static void enable_cobalt_irq(struct irq_data *data) 431 433 { 432 - co_apic_set(is_co_apic(irq), irq); 434 + co_apic_set(is_co_apic(data->irq), data->irq); 433 435 } 434 436 435 - static void disable_cobalt_irq(unsigned int irq) 437 + static void disable_cobalt_irq(struct irq_data *data) 436 438 { 437 - int entry = is_co_apic(irq); 439 + int entry = is_co_apic(data->irq); 438 440 439 441 co_apic_write(CO_APIC_LO(entry), CO_APIC_MASK); 440 442 co_apic_read(CO_APIC_LO(entry)); 441 443 } 442 444 443 - /* 444 - * "irq" really just serves to identify the device. Here is where we 445 - * map this to the Cobalt APIC entry where it's physically wired. 446 - * This is called via request_irq -> setup_irq -> irq_desc->startup() 447 - */ 448 - static unsigned int startup_cobalt_irq(unsigned int irq) 449 - { 450 - unsigned long flags; 451 - struct irq_desc *desc = irq_to_desc(irq); 452 - 453 - spin_lock_irqsave(&cobalt_lock, flags); 454 - if ((desc->status & (IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING))) 455 - desc->status &= ~(IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING); 456 - enable_cobalt_irq(irq); 457 - spin_unlock_irqrestore(&cobalt_lock, flags); 458 - return 0; 459 - } 460 - 461 - static void ack_cobalt_irq(unsigned int irq) 445 + static void ack_cobalt_irq(struct irq_data *data) 462 446 { 463 447 unsigned long flags; 464 448 465 449 spin_lock_irqsave(&cobalt_lock, flags); 466 - disable_cobalt_irq(irq); 450 + disable_cobalt_irq(data); 467 451 apic_write(APIC_EOI, APIC_EIO_ACK); 468 452 spin_unlock_irqrestore(&cobalt_lock, flags); 469 453 } 470 454 471 - static void end_cobalt_irq(unsigned int irq) 472 - { 473 - unsigned long flags; 474 - struct irq_desc *desc = irq_to_desc(irq); 475 - 476 - spin_lock_irqsave(&cobalt_lock, flags); 477 - if (!(desc->status & (IRQ_DISABLED | IRQ_INPROGRESS))) 478 - enable_cobalt_irq(irq); 479 - spin_unlock_irqrestore(&cobalt_lock, flags); 480 - } 481 - 482 455 static struct irq_chip cobalt_irq_type = { 483 - .name = "Cobalt-APIC", 484 - .startup = startup_cobalt_irq, 485 - .shutdown = disable_cobalt_irq, 486 - .enable = enable_cobalt_irq, 487 - .disable = disable_cobalt_irq, 488 - .ack = ack_cobalt_irq, 489 - .end = end_cobalt_irq, 456 + .name = "Cobalt-APIC", 457 + .irq_enable = enable_cobalt_irq, 458 + .irq_disable = disable_cobalt_irq, 459 + .irq_ack = ack_cobalt_irq, 490 460 }; 491 461 492 462 ··· 467 503 * interrupt controller type, and through a special virtual interrupt- 468 504 * controller. Device drivers only see the virtual interrupt sources. 469 505 */ 470 - static unsigned int startup_piix4_master_irq(unsigned int irq) 506 + static unsigned int startup_piix4_master_irq(struct irq_data *data) 471 507 { 472 508 legacy_pic->init(0); 473 - 474 - return startup_cobalt_irq(irq); 509 + enable_cobalt_irq(data); 475 510 } 476 511 477 - static void end_piix4_master_irq(unsigned int irq) 512 + static void end_piix4_master_irq(struct irq_data *data) 478 513 { 479 514 unsigned long flags; 480 515 481 516 spin_lock_irqsave(&cobalt_lock, flags); 482 - enable_cobalt_irq(irq); 517 + enable_cobalt_irq(data); 483 518 spin_unlock_irqrestore(&cobalt_lock, flags); 484 519 } 485 520 486 521 static struct irq_chip piix4_master_irq_type = { 487 - .name = "PIIX4-master", 488 - .startup = startup_piix4_master_irq, 489 - .ack = ack_cobalt_irq, 490 - .end = end_piix4_master_irq, 522 + .name = "PIIX4-master", 523 + .irq_startup = startup_piix4_master_irq, 524 + .irq_ack = ack_cobalt_irq, 491 525 }; 492 526 527 + static void pii4_mask(struct irq_data *data) { } 493 528 494 529 static struct irq_chip piix4_virtual_irq_type = { 495 - .name = "PIIX4-virtual", 530 + .name = "PIIX4-virtual", 531 + .mask = pii4_mask, 496 532 }; 497 - 498 533 499 534 /* 500 535 * PIIX4-8259 master/virtual functions to handle interrupt requests ··· 512 549 */ 513 550 static irqreturn_t piix4_master_intr(int irq, void *dev_id) 514 551 { 515 - int realirq; 516 - struct irq_desc *desc; 517 552 unsigned long flags; 553 + int realirq; 518 554 519 555 raw_spin_lock_irqsave(&i8259A_lock, flags); 520 556 ··· 554 592 555 593 raw_spin_unlock_irqrestore(&i8259A_lock, flags); 556 594 557 - desc = irq_to_desc(realirq); 558 - 559 595 /* 560 596 * handle this 'virtual interrupt' as a Cobalt one now. 561 597 */ 562 - kstat_incr_irqs_this_cpu(realirq, desc); 563 - 564 - if (likely(desc->action != NULL)) 565 - handle_IRQ_event(realirq, desc->action); 566 - 567 - if (!(desc->status & IRQ_DISABLED)) 568 - legacy_pic->chip->unmask(realirq); 598 + generic_handle_irq(realirq); 569 599 570 600 return IRQ_HANDLED; 571 601 ··· 578 624 579 625 static inline void set_piix4_virtual_irq_type(void) 580 626 { 581 - piix4_virtual_irq_type.shutdown = i8259A_chip.mask; 582 627 piix4_virtual_irq_type.enable = i8259A_chip.unmask; 583 628 piix4_virtual_irq_type.disable = i8259A_chip.mask; 629 + piix4_virtual_irq_type.unmask = i8259A_chip.unmask; 584 630 } 585 631 586 - void init_VISWS_APIC_irqs(void) 632 + static void __init visws_pre_intr_init(void) 587 633 { 588 634 int i; 589 635 636 + set_piix4_virtual_irq_type(); 637 + 590 638 for (i = 0; i < CO_IRQ_APIC0 + CO_APIC_LAST + 1; i++) { 591 - struct irq_desc *desc = irq_to_desc(i); 639 + struct irq_chip *chip = NULL; 592 640 593 - desc->status = IRQ_DISABLED; 594 - desc->action = 0; 595 - desc->depth = 1; 641 + if (i == 0) 642 + chip = &cobalt_irq_type; 643 + else if (i == CO_IRQ_IDE0) 644 + chip = &cobalt_irq_type; 645 + else if (i == CO_IRQ_IDE1) 646 + >chip = &cobalt_irq_type; 647 + else if (i == CO_IRQ_8259) 648 + chip = &piix4_master_irq_type; 649 + else if (i < CO_IRQ_APIC0) 650 + chip = &piix4_virtual_irq_type; 651 + else if (IS_CO_APIC(i)) 652 + chip = &cobalt_irq_type; 596 653 597 - if (i == 0) { 598 - desc->chip = &cobalt_irq_type; 599 - } 600 - else if (i == CO_IRQ_IDE0) { 601 - desc->chip = &cobalt_irq_type; 602 - } 603 - else if (i == CO_IRQ_IDE1) { 604 - desc->chip = &cobalt_irq_type; 605 - } 606 - else if (i == CO_IRQ_8259) { 607 - desc->chip = &piix4_master_irq_type; 608 - } 609 - else if (i < CO_IRQ_APIC0) { 610 - set_piix4_virtual_irq_type(); 611 - desc->chip = &piix4_virtual_irq_type; 612 - } 613 - else if (IS_CO_APIC(i)) { 614 - desc->chip = &cobalt_irq_type; 615 - } 654 + if (chip) 655 + set_irq_chip(i, chip); 616 656 } 617 657 618 658 setup_irq(CO_IRQ_8259, &master_action);
+9 -9
arch/x86/lguest/boot.c
··· 791 791 * simple as setting a bit. We don't actually "ack" interrupts as such, we 792 792 * just mask and unmask them. I wonder if we should be cleverer? 793 793 */ 794 - static void disable_lguest_irq(unsigned int irq) 794 + static void disable_lguest_irq(struct irq_data *data) 795 795 { 796 - set_bit(irq, lguest_data.blocked_interrupts); 796 + set_bit(data->irq, lguest_data.blocked_interrupts); 797 797 } 798 798 799 - static void enable_lguest_irq(unsigned int irq) 799 + static void enable_lguest_irq(struct irq_data *data) 800 800 { 801 - clear_bit(irq, lguest_data.blocked_interrupts); 801 + clear_bit(data->irq, lguest_data.blocked_interrupts); 802 802 } 803 803 804 804 /* This structure describes the lguest IRQ controller. */ 805 805 static struct irq_chip lguest_irq_controller = { 806 806 .name = "lguest", 807 - .mask = disable_lguest_irq, 808 - .mask_ack = disable_lguest_irq, 809 - .unmask = enable_lguest_irq, 807 + .irq_mask = disable_lguest_irq, 808 + .irq_mask_ack = disable_lguest_irq, 809 + .irq_unmask = enable_lguest_irq, 810 810 }; 811 811 812 812 /* ··· 838 838 * rather than set them in lguest_init_IRQ we are called here every time an 839 839 * lguest device needs an interrupt. 840 840 * 841 - * FIXME: irq_to_desc_alloc_node() can fail due to lack of memory, we should 841 + * FIXME: irq_alloc_desc_at() can fail due to lack of memory, we should 842 842 * pass that up! 843 843 */ 844 844 void lguest_setup_irq(unsigned int irq) 845 845 { 846 - irq_to_desc_alloc_node(irq, 0); 846 + irq_alloc_desc_at(irq, 0); 847 847 set_irq_chip_and_handler_name(irq, &lguest_irq_controller, 848 848 handle_level_irq, "level"); 849 849 }
+127 -18
arch/x86/oprofile/op_model_amd.c
··· 64 64 * IBS cpuid feature detection 65 65 */ 66 66 67 - #define IBS_CPUID_FEATURES 0x8000001b 67 + #define IBS_CPUID_FEATURES 0x8000001b 68 68 69 69 /* 70 70 * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but 71 71 * bit 0 is used to indicate the existence of IBS. 72 72 */ 73 - #define IBS_CAPS_AVAIL (1LL<<0) 74 - #define IBS_CAPS_RDWROPCNT (1LL<<3) 75 - #define IBS_CAPS_OPCNT (1LL<<4) 73 + #define IBS_CAPS_AVAIL (1U<<0) 74 + #define IBS_CAPS_RDWROPCNT (1U<<3) 75 + #define IBS_CAPS_OPCNT (1U<<4) 76 + 77 + /* 78 + * IBS APIC setup 79 + */ 80 + #define IBSCTL 0x1cc 81 + #define IBSCTL_LVT_OFFSET_VALID (1ULL<<8) 82 + #define IBSCTL_LVT_OFFSET_MASK 0x0F 76 83 77 84 /* 78 85 * IBS randomization macros ··· 273 266 wrmsrl(MSR_AMD64_IBSOPCTL, 0); 274 267 } 275 268 269 + static inline int eilvt_is_available(int offset) 270 + { 271 + /* check if we may assign a vector */ 272 + return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1); 273 + } 274 + 275 + static inline int ibs_eilvt_valid(void) 276 + { 277 + u64 val; 278 + int offset; 279 + 280 + rdmsrl(MSR_AMD64_IBSCTL, val); 281 + if (!(val & IBSCTL_LVT_OFFSET_VALID)) { 282 + pr_err(FW_BUG "cpu %d, invalid IBS " 283 + "interrupt offset %d (MSR%08X=0x%016llx)", 284 + smp_processor_id(), offset, 285 + MSR_AMD64_IBSCTL, val); 286 + return 0; 287 + } 288 + 289 + offset = val & IBSCTL_LVT_OFFSET_MASK; 290 + 291 + if (eilvt_is_available(offset)) 292 + return !0; 293 + 294 + pr_err(FW_BUG "cpu %d, IBS interrupt offset %d " 295 + "not available (MSR%08X=0x%016llx)", 296 + smp_processor_id(), offset, 297 + MSR_AMD64_IBSCTL, val); 298 + 299 + return 0; 300 + } 301 + 302 + static inline int get_ibs_offset(void) 303 + { 304 + u64 val; 305 + 306 + rdmsrl(MSR_AMD64_IBSCTL, val); 307 + if (!(val & IBSCTL_LVT_OFFSET_VALID)) 308 + return -EINVAL; 309 + 310 + return val & IBSCTL_LVT_OFFSET_MASK; 311 + } 312 + 313 + static void setup_APIC_ibs(void) 314 + { 315 + int offset; 316 + 317 + offset = get_ibs_offset(); 318 + if (offset < 0) 319 + goto failed; 320 + 321 + if (!setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 0)) 322 + return; 323 + failed: 324 + pr_warn("oprofile: IBS APIC setup failed on cpu #%d\n", 325 + smp_processor_id()); 326 + } 327 + 328 + static void clear_APIC_ibs(void) 329 + { 330 + int offset; 331 + 332 + offset = get_ibs_offset(); 333 + if (offset >= 0) 334 + setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1); 335 + } 336 + 276 337 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX 277 338 278 339 static void op_mux_switch_ctrl(struct op_x86_model_spec const *model, ··· 451 376 } 452 377 453 378 if (ibs_caps) 454 - setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_NMI, 0); 379 + setup_APIC_ibs(); 455 380 } 456 381 457 382 static void op_amd_cpu_shutdown(void) 458 383 { 459 384 if (ibs_caps) 460 - setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1); 385 + clear_APIC_ibs(); 461 386 } 462 387 463 388 static int op_amd_check_ctrs(struct pt_regs * const regs, ··· 520 445 op_amd_stop_ibs(); 521 446 } 522 447 523 - static int __init_ibs_nmi(void) 448 + static int setup_ibs_ctl(int ibs_eilvt_off) 524 449 { 525 - #define IBSCTL_LVTOFFSETVAL (1 << 8) 526 - #define IBSCTL 0x1cc 527 450 struct pci_dev *cpu_cfg; 528 451 int nodes; 529 452 u32 value = 0; 530 - u8 ibs_eilvt_off; 531 - 532 - ibs_eilvt_off = setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1); 533 453 534 454 nodes = 0; 535 455 cpu_cfg = NULL; ··· 536 466 break; 537 467 ++nodes; 538 468 pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off 539 - | IBSCTL_LVTOFFSETVAL); 469 + | IBSCTL_LVT_OFFSET_VALID); 540 470 pci_read_config_dword(cpu_cfg, IBSCTL, &value); 541 - if (value != (ibs_eilvt_off | IBSCTL_LVTOFFSETVAL)) { 471 + if (value != (ibs_eilvt_off | IBSCTL_LVT_OFFSET_VALID)) { 542 472 pci_dev_put(cpu_cfg); 543 473 printk(KERN_DEBUG "Failed to setup IBS LVT offset, " 544 - "IBSCTL = 0x%08x", value); 545 - return 1; 474 + "IBSCTL = 0x%08x\n", value); 475 + return -EINVAL; 546 476 } 547 477 } while (1); 548 478 549 479 if (!nodes) { 550 - printk(KERN_DEBUG "No CPU node configured for IBS"); 551 - return 1; 480 + printk(KERN_DEBUG "No CPU node configured for IBS\n"); 481 + return -ENODEV; 552 482 } 483 + 484 + return 0; 485 + } 486 + 487 + static int force_ibs_eilvt_setup(void) 488 + { 489 + int i; 490 + int ret; 491 + 492 + /* find the next free available EILVT entry */ 493 + for (i = 1; i < 4; i++) { 494 + if (!eilvt_is_available(i)) 495 + continue; 496 + ret = setup_ibs_ctl(i); 497 + if (ret) 498 + return ret; 499 + return 0; 500 + } 501 + 502 + printk(KERN_DEBUG "No EILVT entry available\n"); 503 + 504 + return -EBUSY; 505 + } 506 + 507 + static int __init_ibs_nmi(void) 508 + { 509 + int ret; 510 + 511 + if (ibs_eilvt_valid()) 512 + return 0; 513 + 514 + ret = force_ibs_eilvt_setup(); 515 + if (ret) 516 + return ret; 517 + 518 + if (!ibs_eilvt_valid()) 519 + return -EFAULT; 520 + 521 + pr_err(FW_BUG "workaround enabled for IBS LVT offset\n"); 553 522 554 523 return 0; 555 524 }
+1 -1
arch/xtensa/kernel/irq.c
··· 92 92 for_each_online_cpu(j) 93 93 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); 94 94 #endif 95 - seq_printf(p, " %14s", irq_desc[i].chip->typename); 95 + seq_printf(p, " %14s", irq_desc[i].chip->name); 96 96 seq_printf(p, " %s", action->name); 97 97 98 98 for (action=action->next; action; action = action->next)
+3 -3
drivers/isdn/act2000/act2000.h
··· 141 141 __u8 rcvhdr[8]; 142 142 } irq_data_isa; 143 143 144 - typedef union irq_data { 144 + typedef union act2000_irq_data { 145 145 irq_data_isa isa; 146 - } irq_data; 146 + } act2000_irq_data; 147 147 148 148 /* 149 149 * Per card driver data ··· 176 176 char *status_buf_read; 177 177 char *status_buf_write; 178 178 char *status_buf_end; 179 - irq_data idat; /* Data used for IRQ handler */ 179 + act2000_irq_data idat; /* Data used for IRQ handler */ 180 180 isdn_if interface; /* Interface to upper layer */ 181 181 char regname[35]; /* Name used for request_region */ 182 182 } act2000_card;
+14 -4
drivers/isdn/hisax/config.c
··· 801 801 ll_unload(csta); 802 802 } 803 803 804 + static irqreturn_t card_irq(int intno, void *dev_id) 805 + { 806 + struct IsdnCardState *cs = dev_id; 807 + irqreturn_t ret = cs->irq_func(intno, cs); 808 + 809 + if (ret == IRQ_HANDLED) 810 + cs->irq_cnt++; 811 + return ret; 812 + } 813 + 804 814 static int init_card(struct IsdnCardState *cs) 805 815 { 806 816 int irq_cnt, cnt = 3, ret; ··· 819 809 ret = cs->cardmsg(cs, CARD_INIT, NULL); 820 810 return(ret); 821 811 } 822 - irq_cnt = kstat_irqs(cs->irq); 812 + irq_cnt = cs->irq_cnt = 0; 823 813 printk(KERN_INFO "%s: IRQ %d count %d\n", CardType[cs->typ], 824 814 cs->irq, irq_cnt); 825 - if (request_irq(cs->irq, cs->irq_func, cs->irq_flags, "HiSax", cs)) { 815 + if (request_irq(cs->irq, card_irq, cs->irq_flags, "HiSax", cs)) { 826 816 printk(KERN_WARNING "HiSax: couldn't get interrupt %d\n", 827 817 cs->irq); 828 818 return 1; ··· 832 822 /* Timeout 10ms */ 833 823 msleep(10); 834 824 printk(KERN_INFO "%s: IRQ %d count %d\n", 835 - CardType[cs->typ], cs->irq, kstat_irqs(cs->irq)); 836 - if (kstat_irqs(cs->irq) == irq_cnt) { 825 + CardType[cs->typ], cs->irq, cs->irq_cnt); 826 + if (cs->irq_cnt == irq_cnt) { 837 827 printk(KERN_WARNING 838 828 "%s: IRQ(%d) getting no interrupts during init %d\n", 839 829 CardType[cs->typ], cs->irq, 4 - cnt);
+1
drivers/isdn/hisax/hisax.h
··· 959 959 u_long event; 960 960 struct work_struct tqueue; 961 961 struct timer_list dbusytimer; 962 + unsigned int irq_cnt; 962 963 #ifdef ERROR_STATISTIC 963 964 int err_crc; 964 965 int err_tx;
+2 -2
drivers/mfd/twl4030-irq.c
··· 78 78 u8 irq_lines; /* number of supported irq lines */ 79 79 80 80 /* SIR ignored -- set interrupt, for testing only */ 81 - struct irq_data { 81 + struct sih_irq_data { 82 82 u8 isr_offset; 83 83 u8 imr_offset; 84 84 } mask[2]; ··· 810 810 twl4030_irq_chip = dummy_irq_chip; 811 811 twl4030_irq_chip.name = "twl4030"; 812 812 813 - twl4030_sih_irq_chip.ack = dummy_irq_chip.ack; 813 + twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack; 814 814 815 815 for (i = irq_base; i < irq_end; i++) { 816 816 set_irq_chip_and_handler(i, &twl4030_irq_chip,
+4 -4
drivers/pci/dmar.c
··· 1221 1221 } 1222 1222 } 1223 1223 1224 - void dmar_msi_unmask(unsigned int irq) 1224 + void dmar_msi_unmask(struct irq_data *data) 1225 1225 { 1226 - struct intel_iommu *iommu = get_irq_data(irq); 1226 + struct intel_iommu *iommu = irq_data_get_irq_data(data); 1227 1227 unsigned long flag; 1228 1228 1229 1229 /* unmask it */ ··· 1234 1234 spin_unlock_irqrestore(&iommu->register_lock, flag); 1235 1235 } 1236 1236 1237 - void dmar_msi_mask(unsigned int irq) 1237 + void dmar_msi_mask(struct irq_data *data) 1238 1238 { 1239 1239 unsigned long flag; 1240 - struct intel_iommu *iommu = get_irq_data(irq); 1240 + struct intel_iommu *iommu = irq_data_get_irq_data(data); 1241 1241 1242 1242 /* mask it */ 1243 1243 spin_lock_irqsave(&iommu->register_lock, flag);
+8 -14
drivers/pci/htirq.c
··· 57 57 *msg = cfg->msg; 58 58 } 59 59 60 - void mask_ht_irq(unsigned int irq) 60 + void mask_ht_irq(struct irq_data *data) 61 61 { 62 - struct ht_irq_cfg *cfg; 63 - struct ht_irq_msg msg; 62 + struct ht_irq_cfg *cfg = irq_data_get_irq_data(data); 63 + struct ht_irq_msg msg = cfg->msg; 64 64 65 - cfg = get_irq_data(irq); 66 - 67 - msg = cfg->msg; 68 65 msg.address_lo |= 1; 69 - write_ht_irq_msg(irq, &msg); 66 + write_ht_irq_msg(data->irq, &msg); 70 67 } 71 68 72 - void unmask_ht_irq(unsigned int irq) 69 + void unmask_ht_irq(struct irq_data *data) 73 70 { 74 - struct ht_irq_cfg *cfg; 75 - struct ht_irq_msg msg; 71 + struct ht_irq_cfg *cfg = irq_data_get_irq_data(data); 72 + struct ht_irq_msg msg = cfg->msg; 76 73 77 - cfg = get_irq_data(irq); 78 - 79 - msg = cfg->msg; 80 74 msg.address_lo &= ~1; 81 - write_ht_irq_msg(irq, &msg); 75 + write_ht_irq_msg(data->irq, &msg); 82 76 } 83 77 84 78 /**
+28 -186
drivers/pci/intr_remapping.c
··· 46 46 } 47 47 early_param("intremap", setup_intremap); 48 48 49 - struct irq_2_iommu { 50 - struct intel_iommu *iommu; 51 - u16 irte_index; 52 - u16 sub_handle; 53 - u8 irte_mask; 54 - }; 55 - 56 - #ifdef CONFIG_GENERIC_HARDIRQS 57 - static struct irq_2_iommu *get_one_free_irq_2_iommu(int node) 58 - { 59 - struct irq_2_iommu *iommu; 60 - 61 - iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node); 62 - printk(KERN_DEBUG "alloc irq_2_iommu on node %d\n", node); 63 - 64 - return iommu; 65 - } 66 - 67 - static struct irq_2_iommu *irq_2_iommu(unsigned int irq) 68 - { 69 - struct irq_desc *desc; 70 - 71 - desc = irq_to_desc(irq); 72 - 73 - if (WARN_ON_ONCE(!desc)) 74 - return NULL; 75 - 76 - return desc->irq_2_iommu; 77 - } 78 - 79 - static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) 80 - { 81 - struct irq_desc *desc; 82 - struct irq_2_iommu *irq_iommu; 83 - 84 - desc = irq_to_desc(irq); 85 - if (!desc) { 86 - printk(KERN_INFO "can not get irq_desc for %d\n", irq); 87 - return NULL; 88 - } 89 - 90 - irq_iommu = desc->irq_2_iommu; 91 - 92 - if (!irq_iommu) 93 - desc->irq_2_iommu = get_one_free_irq_2_iommu(irq_node(irq)); 94 - 95 - return desc->irq_2_iommu; 96 - } 97 - 98 - #else /* !CONFIG_SPARSE_IRQ */ 99 - 100 - static struct irq_2_iommu irq_2_iommuX[NR_IRQS]; 101 - 102 - static struct irq_2_iommu *irq_2_iommu(unsigned int irq) 103 - { 104 - if (irq < nr_irqs) 105 - return &irq_2_iommuX[irq]; 106 - 107 - return NULL; 108 - } 109 - static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) 110 - { 111 - return irq_2_iommu(irq); 112 - } 113 - #endif 114 - 115 49 static DEFINE_SPINLOCK(irq_2_ir_lock); 116 50 117 - static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq) 51 + static struct irq_2_iommu *irq_2_iommu(unsigned int irq) 118 52 { 119 - struct irq_2_iommu *irq_iommu; 120 - 121 - irq_iommu = irq_2_iommu(irq); 122 - 123 - if (!irq_iommu) 124 - return NULL; 125 - 126 - if (!irq_iommu->iommu) 127 - return NULL; 128 - 129 - return irq_iommu; 130 - } 131 - 132 - int irq_remapped(int irq) 133 - { 134 - return valid_irq_2_iommu(irq) != NULL; 53 + struct irq_cfg *cfg = get_irq_chip_data(irq); 54 + return cfg ? &cfg->irq_2_iommu : NULL; 135 55 } 136 56 137 57 int get_irte(int irq, struct irte *entry) 138 58 { 139 - int index; 140 - struct irq_2_iommu *irq_iommu; 59 + struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); 141 60 unsigned long flags; 61 + int index; 142 62 143 - if (!entry) 63 + if (!entry || !irq_iommu) 144 64 return -1; 145 65 146 66 spin_lock_irqsave(&irq_2_ir_lock, flags); 147 - irq_iommu = valid_irq_2_iommu(irq); 148 - if (!irq_iommu) { 149 - spin_unlock_irqrestore(&irq_2_ir_lock, flags); 150 - return -1; 151 - } 152 67 153 68 index = irq_iommu->irte_index + irq_iommu->sub_handle; 154 69 *entry = *(irq_iommu->iommu->ir_table->base + index); ··· 75 160 int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) 76 161 { 77 162 struct ir_table *table = iommu->ir_table; 78 - struct irq_2_iommu *irq_iommu; 163 + struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); 79 164 u16 index, start_index; 80 165 unsigned int mask = 0; 81 166 unsigned long flags; 82 167 int i; 83 168 84 - if (!count) 169 + if (!count || !irq_iommu) 85 170 return -1; 86 - 87 - #ifndef CONFIG_SPARSE_IRQ 88 - /* protect irq_2_iommu_alloc later */ 89 - if (irq >= nr_irqs) 90 - return -1; 91 - #endif 92 171 93 172 /* 94 173 * start the IRTE search from index 0. ··· 123 214 for (i = index; i < index + count; i++) 124 215 table->base[i].present = 1; 125 216 126 - irq_iommu = irq_2_iommu_alloc(irq); 127 - if (!irq_iommu) { 128 - spin_unlock_irqrestore(&irq_2_ir_lock, flags); 129 - printk(KERN_ERR "can't allocate irq_2_iommu\n"); 130 - return -1; 131 - } 132 - 133 217 irq_iommu->iommu = iommu; 134 218 irq_iommu->irte_index = index; 135 219 irq_iommu->sub_handle = 0; ··· 146 244 147 245 int map_irq_to_irte_handle(int irq, u16 *sub_handle) 148 246 { 149 - int index; 150 - struct irq_2_iommu *irq_iommu; 247 + struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); 151 248 unsigned long flags; 249 + int index; 250 + 251 + if (!irq_iommu) 252 + return -1; 152 253 153 254 spin_lock_irqsave(&irq_2_ir_lock, flags); 154 - irq_iommu = valid_irq_2_iommu(irq); 155 - if (!irq_iommu) { 156 - spin_unlock_irqrestore(&irq_2_ir_lock, flags); 157 - return -1; 158 - } 159 - 160 255 *sub_handle = irq_iommu->sub_handle; 161 256 index = irq_iommu->irte_index; 162 257 spin_unlock_irqrestore(&irq_2_ir_lock, flags); ··· 162 263 163 264 int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) 164 265 { 165 - struct irq_2_iommu *irq_iommu; 266 + struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); 166 267 unsigned long flags; 167 268 168 - spin_lock_irqsave(&irq_2_ir_lock, flags); 169 - 170 - irq_iommu = irq_2_iommu_alloc(irq); 171 - 172 - if (!irq_iommu) { 173 - spin_unlock_irqrestore(&irq_2_ir_lock, flags); 174 - printk(KERN_ERR "can't allocate irq_2_iommu\n"); 269 + if (!irq_iommu) 175 270 return -1; 176 - } 271 + 272 + spin_lock_irqsave(&irq_2_ir_lock, flags); 177 273 178 274 irq_iommu->iommu = iommu; 179 275 irq_iommu->irte_index = index; ··· 180 286 return 0; 181 287 } 182 288 183 - int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index) 184 - { 185 - struct irq_2_iommu *irq_iommu; 186 - unsigned long flags; 187 - 188 - spin_lock_irqsave(&irq_2_ir_lock, flags); 189 - irq_iommu = valid_irq_2_iommu(irq); 190 - if (!irq_iommu) { 191 - spin_unlock_irqrestore(&irq_2_ir_lock, flags); 192 - return -1; 193 - } 194 - 195 - irq_iommu->iommu = NULL; 196 - irq_iommu->irte_index = 0; 197 - irq_iommu->sub_handle = 0; 198 - irq_2_iommu(irq)->irte_mask = 0; 199 - 200 - spin_unlock_irqrestore(&irq_2_ir_lock, flags); 201 - 202 - return 0; 203 - } 204 - 205 289 int modify_irte(int irq, struct irte *irte_modified) 206 290 { 207 - int rc; 208 - int index; 209 - struct irte *irte; 291 + struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); 210 292 struct intel_iommu *iommu; 211 - struct irq_2_iommu *irq_iommu; 212 293 unsigned long flags; 294 + struct irte *irte; 295 + int rc, index; 296 + 297 + if (!irq_iommu) 298 + return -1; 213 299 214 300 spin_lock_irqsave(&irq_2_ir_lock, flags); 215 - irq_iommu = valid_irq_2_iommu(irq); 216 - if (!irq_iommu) { 217 - spin_unlock_irqrestore(&irq_2_ir_lock, flags); 218 - return -1; 219 - } 220 301 221 302 iommu = irq_iommu->iommu; 222 303 ··· 203 334 __iommu_flush_cache(iommu, irte, sizeof(*irte)); 204 335 205 336 rc = qi_flush_iec(iommu, index, 0); 206 - spin_unlock_irqrestore(&irq_2_ir_lock, flags); 207 - 208 - return rc; 209 - } 210 - 211 - int flush_irte(int irq) 212 - { 213 - int rc; 214 - int index; 215 - struct intel_iommu *iommu; 216 - struct irq_2_iommu *irq_iommu; 217 - unsigned long flags; 218 - 219 - spin_lock_irqsave(&irq_2_ir_lock, flags); 220 - irq_iommu = valid_irq_2_iommu(irq); 221 - if (!irq_iommu) { 222 - spin_unlock_irqrestore(&irq_2_ir_lock, flags); 223 - return -1; 224 - } 225 - 226 - iommu = irq_iommu->iommu; 227 - 228 - index = irq_iommu->irte_index + irq_iommu->sub_handle; 229 - 230 - rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask); 231 337 spin_unlock_irqrestore(&irq_2_ir_lock, flags); 232 338 233 339 return rc; ··· 264 420 265 421 int free_irte(int irq) 266 422 { 267 - int rc = 0; 268 - struct irq_2_iommu *irq_iommu; 423 + struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); 269 424 unsigned long flags; 425 + int rc; 426 + 427 + if (!irq_iommu) 428 + return -1; 270 429 271 430 spin_lock_irqsave(&irq_2_ir_lock, flags); 272 - irq_iommu = valid_irq_2_iommu(irq); 273 - if (!irq_iommu) { 274 - spin_unlock_irqrestore(&irq_2_ir_lock, flags); 275 - return -1; 276 - } 277 431 278 432 rc = clear_entries(irq_iommu); 279 433
+16 -22
drivers/pci/msi.c
··· 170 170 desc->masked = __msix_mask_irq(desc, flag); 171 171 } 172 172 173 - static void msi_set_mask_bit(unsigned irq, u32 flag) 173 + static void msi_set_mask_bit(struct irq_data *data, u32 flag) 174 174 { 175 - struct msi_desc *desc = get_irq_msi(irq); 175 + struct msi_desc *desc = irq_data_get_msi(data); 176 176 177 177 if (desc->msi_attrib.is_msix) { 178 178 msix_mask_irq(desc, flag); 179 179 readl(desc->mask_base); /* Flush write to device */ 180 180 } else { 181 - unsigned offset = irq - desc->dev->irq; 181 + unsigned offset = data->irq - desc->dev->irq; 182 182 msi_mask_irq(desc, 1 << offset, flag << offset); 183 183 } 184 184 } 185 185 186 - void mask_msi_irq(unsigned int irq) 186 + void mask_msi_irq(struct irq_data *data) 187 187 { 188 - msi_set_mask_bit(irq, 1); 188 + msi_set_mask_bit(data, 1); 189 189 } 190 190 191 - void unmask_msi_irq(unsigned int irq) 191 + void unmask_msi_irq(struct irq_data *data) 192 192 { 193 - msi_set_mask_bit(irq, 0); 193 + msi_set_mask_bit(data, 0); 194 194 } 195 195 196 - void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) 196 + void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) 197 197 { 198 - struct msi_desc *entry = get_irq_desc_msi(desc); 199 - 200 198 BUG_ON(entry->dev->current_state != PCI_D0); 201 199 202 200 if (entry->msi_attrib.is_msix) { ··· 225 227 226 228 void read_msi_msg(unsigned int irq, struct msi_msg *msg) 227 229 { 228 - struct irq_desc *desc = irq_to_desc(irq); 230 + struct msi_desc *entry = get_irq_msi(irq); 229 231 230 - read_msi_msg_desc(desc, msg); 232 + __read_msi_msg(entry, msg); 231 233 } 232 234 233 - void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) 235 + void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg) 234 236 { 235 - struct msi_desc *entry = get_irq_desc_msi(desc); 236 - 237 237 /* Assert that the cache is valid, assuming that 238 238 * valid messages are not all-zeroes. */ 239 239 BUG_ON(!(entry->msg.address_hi | entry->msg.address_lo | ··· 242 246 243 247 void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) 244 248 { 245 - struct irq_desc *desc = irq_to_desc(irq); 249 + struct msi_desc *entry = get_irq_msi(irq); 246 250 247 - get_cached_msi_msg_desc(desc, msg); 251 + __get_cached_msi_msg(entry, msg); 248 252 } 249 253 250 - void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) 254 + void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) 251 255 { 252 - struct msi_desc *entry = get_irq_desc_msi(desc); 253 - 254 256 if (entry->dev->current_state != PCI_D0) { 255 257 /* Don't touch the hardware now */ 256 258 } else if (entry->msi_attrib.is_msix) { ··· 286 292 287 293 void write_msi_msg(unsigned int irq, struct msi_msg *msg) 288 294 { 289 - struct irq_desc *desc = irq_to_desc(irq); 295 + struct msi_desc *entry = get_irq_msi(irq); 290 296 291 - write_msi_msg_desc(desc, msg); 297 + __write_msi_msg(entry, msg); 292 298 } 293 299 294 300 static void free_msi_irqs(struct pci_dev *dev)
+11 -12
drivers/xen/events.c
··· 338 338 339 339 static int find_unbound_irq(void) 340 340 { 341 - int irq; 342 - struct irq_desc *desc; 341 + struct irq_data *data; 342 + int irq, res; 343 343 344 344 for (irq = 0; irq < nr_irqs; irq++) { 345 - desc = irq_to_desc(irq); 345 + data = irq_get_irq_data(irq); 346 346 /* only 0->15 have init'd desc; handle irq > 16 */ 347 - if (desc == NULL) 347 + if (!data) 348 348 break; 349 - if (desc->chip == &no_irq_chip) 349 + if (data->chip == &no_irq_chip) 350 350 break; 351 - if (desc->chip != &xen_dynamic_chip) 351 + if (data->chip != &xen_dynamic_chip) 352 352 continue; 353 353 if (irq_info[irq].type == IRQT_UNBOUND) 354 - break; 354 + return irq; 355 355 } 356 356 357 357 if (irq == nr_irqs) 358 358 panic("No available IRQ to bind to: increase nr_irqs!\n"); 359 359 360 - desc = irq_to_desc_alloc_node(irq, 0); 361 - if (WARN_ON(desc == NULL)) 362 - return -1; 360 + res = irq_alloc_desc_at(irq, 0); 363 361 364 - dynamic_irq_init_keep_chip_data(irq); 362 + if (WARN_ON(res != irq)) 363 + return -1; 365 364 366 365 return irq; 367 366 } ··· 494 495 if (irq_info[irq].type != IRQT_UNBOUND) { 495 496 irq_info[irq] = mk_unbound_info(); 496 497 497 - dynamic_irq_cleanup(irq); 498 + irq_free_desc(irq); 498 499 } 499 500 500 501 spin_unlock(&irq_mapping_update_lock);
+4 -6
include/linux/dmar.h
··· 106 106 __u64 high; 107 107 }; 108 108 }; 109 + 109 110 #ifdef CONFIG_INTR_REMAP 110 111 extern int intr_remapping_enabled; 111 112 extern int intr_remapping_supported(void); ··· 120 119 extern int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, 121 120 u16 sub_handle); 122 121 extern int map_irq_to_irte_handle(int irq, u16 *sub_handle); 123 - extern int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index); 124 - extern int flush_irte(int irq); 125 122 extern int free_irte(int irq); 126 123 127 - extern int irq_remapped(int irq); 128 124 extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev); 129 125 extern struct intel_iommu *map_ioapic_to_ir(int apic); 130 126 extern struct intel_iommu *map_hpet_to_ir(u8 id); ··· 175 177 return 0; 176 178 } 177 179 178 - #define irq_remapped(irq) (0) 179 180 #define enable_intr_remapping(mode) (-1) 180 181 #define disable_intr_remapping() (0) 181 182 #define reenable_intr_remapping(mode) (0) ··· 184 187 /* Can't use the common MSI interrupt functions 185 188 * since DMAR is not a pci device 186 189 */ 187 - extern void dmar_msi_unmask(unsigned int irq); 188 - extern void dmar_msi_mask(unsigned int irq); 190 + struct irq_data; 191 + extern void dmar_msi_unmask(struct irq_data *data); 192 + extern void dmar_msi_mask(struct irq_data *data); 189 193 extern void dmar_msi_read(int irq, struct msi_msg *msg); 190 194 extern void dmar_msi_write(int irq, struct msi_msg *msg); 191 195 extern int dmar_set_interrupt(struct intel_iommu *iommu);
+3 -2
include/linux/htirq.h
··· 9 9 /* Helper functions.. */ 10 10 void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg); 11 11 void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg); 12 - void mask_ht_irq(unsigned int irq); 13 - void unmask_ht_irq(unsigned int irq); 12 + struct irq_data; 13 + void mask_ht_irq(struct irq_data *data); 14 + void unmask_ht_irq(struct irq_data *data); 14 15 15 16 /* The arch hook for getting things started */ 16 17 int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev);
-3
include/linux/interrupt.h
··· 647 647 struct seq_file; 648 648 int show_interrupts(struct seq_file *p, void *v); 649 649 650 - struct irq_desc; 651 - 652 650 extern int early_irq_init(void); 653 651 extern int arch_probe_nr_irqs(void); 654 652 extern int arch_early_irq_init(void); 655 - extern int arch_init_chip_data(struct irq_desc *desc, int node); 656 653 657 654 #endif
+196 -301
include/linux/irq.h
··· 72 72 #define IRQ_ONESHOT 0x08000000 /* IRQ is not unmasked after hardirq */ 73 73 #define IRQ_NESTED_THREAD 0x10000000 /* IRQ is nested into another, no own handler thread */ 74 74 75 + #define IRQF_MODIFY_MASK \ 76 + (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ 77 + IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL) 78 + 75 79 #ifdef CONFIG_IRQ_PER_CPU 76 80 # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) 77 81 # define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) ··· 84 80 # define IRQ_NO_BALANCING_MASK IRQ_NO_BALANCING 85 81 #endif 86 82 87 - struct proc_dir_entry; 88 83 struct msi_desc; 84 + 85 + /** 86 + * struct irq_data - per irq and irq chip data passed down to chip functions 87 + * @irq: interrupt number 88 + * @node: node index useful for balancing 89 + * @chip: low level interrupt hardware access 90 + * @handler_data: per-IRQ data for the irq_chip methods 91 + * @chip_data: platform-specific per-chip private data for the chip 92 + * methods, to allow shared chip implementations 93 + * @msi_desc: MSI descriptor 94 + * @affinity: IRQ affinity on SMP 95 + * 96 + * The fields here need to overlay the ones in irq_desc until we 97 + * cleaned up the direct references and switched everything over to 98 + * irq_data. 99 + */ 100 + struct irq_data { 101 + unsigned int irq; 102 + unsigned int node; 103 + struct irq_chip *chip; 104 + void *handler_data; 105 + void *chip_data; 106 + struct msi_desc *msi_desc; 107 + #ifdef CONFIG_SMP 108 + cpumask_var_t affinity; 109 + #endif 110 + }; 89 111 90 112 /** 91 113 * struct irq_chip - hardware interrupt chip descriptor 92 114 * 93 115 * @name: name for /proc/interrupts 94 - * @startup: start up the interrupt (defaults to ->enable if NULL) 95 - * @shutdown: shut down the interrupt (defaults to ->disable if NULL) 96 - * @enable: enable the interrupt (defaults to chip->unmask if NULL) 97 - * @disable: disable the interrupt 98 - * @ack: start of a new interrupt 99 - * @mask: mask an interrupt source 100 - * @mask_ack: ack and mask an interrupt source 101 - * @unmask: unmask an interrupt source 102 - * @eoi: end of interrupt - chip level 103 - * @end: end of interrupt - flow level 104 - * @set_affinity: set the CPU affinity on SMP machines 105 - * @retrigger: resend an IRQ to the CPU 106 - * @set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ 107 - * @set_wake: enable/disable power-management wake-on of an IRQ 116 + * @startup: deprecated, replaced by irq_startup 117 + * @shutdown: deprecated, replaced by irq_shutdown 118 + * @enable: deprecated, replaced by irq_enable 119 + * @disable: deprecated, replaced by irq_disable 120 + * @ack: deprecated, replaced by irq_ack 121 + * @mask: deprecated, replaced by irq_mask 122 + * @mask_ack: deprecated, replaced by irq_mask_ack 123 + * @unmask: deprecated, replaced by irq_unmask 124 + * @eoi: deprecated, replaced by irq_eoi 125 + * @end: deprecated, will go away with __do_IRQ() 126 + * @set_affinity: deprecated, replaced by irq_set_affinity 127 + * @retrigger: deprecated, replaced by irq_retrigger 128 + * @set_type: deprecated, replaced by irq_set_type 129 + * @set_wake: deprecated, replaced by irq_wake 130 + * @bus_lock: deprecated, replaced by irq_bus_lock 131 + * @bus_sync_unlock: deprecated, replaced by irq_bus_sync_unlock 108 132 * 109 - * @bus_lock: function to lock access to slow bus (i2c) chips 110 - * @bus_sync_unlock: function to sync and unlock slow bus (i2c) chips 133 + * @irq_startup: start up the interrupt (defaults to ->enable if NULL) 134 + * @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL) 135 + * @irq_enable: enable the interrupt (defaults to chip->unmask if NULL) 136 + * @irq_disable: disable the interrupt 137 + * @irq_ack: start of a new interrupt 138 + * @irq_mask: mask an interrupt source 139 + * @irq_mask_ack: ack and mask an interrupt source 140 + * @irq_unmask: unmask an interrupt source 141 + * @irq_eoi: end of interrupt 142 + * @irq_set_affinity: set the CPU affinity on SMP machines 143 + * @irq_retrigger: resend an IRQ to the CPU 144 + * @irq_set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ 145 + * @irq_set_wake: enable/disable power-management wake-on of an IRQ 146 + * @irq_bus_lock: function to lock access to slow bus (i2c) chips 147 + * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips 111 148 * 112 149 * @release: release function solely used by UML 113 - * @typename: obsoleted by name, kept as migration helper 114 150 */ 115 151 struct irq_chip { 116 152 const char *name; 153 + #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED 117 154 unsigned int (*startup)(unsigned int irq); 118 155 void (*shutdown)(unsigned int irq); 119 156 void (*enable)(unsigned int irq); ··· 175 130 176 131 void (*bus_lock)(unsigned int irq); 177 132 void (*bus_sync_unlock)(unsigned int irq); 133 + #endif 134 + unsigned int (*irq_startup)(struct irq_data *data); 135 + void (*irq_shutdown)(struct irq_data *data); 136 + void (*irq_enable)(struct irq_data *data); 137 + void (*irq_disable)(struct irq_data *data); 138 + 139 + void (*irq_ack)(struct irq_data *data); 140 + void (*irq_mask)(struct irq_data *data); 141 + void (*irq_mask_ack)(struct irq_data *data); 142 + void (*irq_unmask)(struct irq_data *data); 143 + void (*irq_eoi)(struct irq_data *data); 144 + 145 + int (*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force); 146 + int (*irq_retrigger)(struct irq_data *data); 147 + int (*irq_set_type)(struct irq_data *data, unsigned int flow_type); 148 + int (*irq_set_wake)(struct irq_data *data, unsigned int on); 149 + 150 + void (*irq_bus_lock)(struct irq_data *data); 151 + void (*irq_bus_sync_unlock)(struct irq_data *data); 178 152 179 153 /* Currently used only by UML, might disappear one day.*/ 180 154 #ifdef CONFIG_IRQ_RELEASE_METHOD 181 155 void (*release)(unsigned int irq, void *dev_id); 182 156 #endif 183 - /* 184 - * For compatibility, ->typename is copied into ->name. 185 - * Will disappear. 186 - */ 187 - const char *typename; 188 157 }; 189 158 190 - struct timer_rand_state; 191 - struct irq_2_iommu; 192 - /** 193 - * struct irq_desc - interrupt descriptor 194 - * @irq: interrupt number for this descriptor 195 - * @timer_rand_state: pointer to timer rand state struct 196 - * @kstat_irqs: irq stats per cpu 197 - * @irq_2_iommu: iommu with this irq 198 - * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] 199 - * @chip: low level interrupt hardware access 200 - * @msi_desc: MSI descriptor 201 - * @handler_data: per-IRQ data for the irq_chip methods 202 - * @chip_data: platform-specific per-chip private data for the chip 203 - * methods, to allow shared chip implementations 204 - * @action: the irq action chain 205 - * @status: status information 206 - * @depth: disable-depth, for nested irq_disable() calls 207 - * @wake_depth: enable depth, for multiple set_irq_wake() callers 208 - * @irq_count: stats field to detect stalled irqs 209 - * @last_unhandled: aging timer for unhandled count 210 - * @irqs_unhandled: stats field for spurious unhandled interrupts 211 - * @lock: locking for SMP 212 - * @affinity: IRQ affinity on SMP 213 - * @node: node index useful for balancing 214 - * @pending_mask: pending rebalanced interrupts 215 - * @threads_active: number of irqaction threads currently running 216 - * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers 217 - * @dir: /proc/irq/ procfs entry 218 - * @name: flow handler name for /proc/interrupts output 219 - */ 220 - struct irq_desc { 221 - unsigned int irq; 222 - struct timer_rand_state *timer_rand_state; 223 - unsigned int *kstat_irqs; 224 - #ifdef CONFIG_INTR_REMAP 225 - struct irq_2_iommu *irq_2_iommu; 226 - #endif 227 - irq_flow_handler_t handle_irq; 228 - struct irq_chip *chip; 229 - struct msi_desc *msi_desc; 230 - void *handler_data; 231 - void *chip_data; 232 - struct irqaction *action; /* IRQ action list */ 233 - unsigned int status; /* IRQ status */ 234 - 235 - unsigned int depth; /* nested irq disables */ 236 - unsigned int wake_depth; /* nested wake enables */ 237 - unsigned int irq_count; /* For detecting broken IRQs */ 238 - unsigned long last_unhandled; /* Aging timer for unhandled count */ 239 - unsigned int irqs_unhandled; 240 - raw_spinlock_t lock; 241 - #ifdef CONFIG_SMP 242 - cpumask_var_t affinity; 243 - const struct cpumask *affinity_hint; 244 - unsigned int node; 245 - #ifdef CONFIG_GENERIC_PENDING_IRQ 246 - cpumask_var_t pending_mask; 247 - #endif 248 - #endif 249 - atomic_t threads_active; 250 - wait_queue_head_t wait_for_threads; 251 - #ifdef CONFIG_PROC_FS 252 - struct proc_dir_entry *dir; 253 - #endif 254 - const char *name; 255 - } ____cacheline_internodealigned_in_smp; 256 - 257 - extern void arch_init_copy_chip_data(struct irq_desc *old_desc, 258 - struct irq_desc *desc, int node); 259 - extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc); 260 - 261 - #ifndef CONFIG_SPARSE_IRQ 262 - extern struct irq_desc irq_desc[NR_IRQS]; 263 - #endif 264 - 265 - #ifdef CONFIG_NUMA_IRQ_DESC 266 - extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int node); 267 - #else 268 - static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) 269 - { 270 - return desc; 271 - } 272 - #endif 273 - 274 - extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node); 159 + /* This include will go away once we isolated irq_desc usage to core code */ 160 + #include <linux/irqdesc.h> 275 161 276 162 /* 277 163 * Pick up the arch-dependent methods: 278 164 */ 279 165 #include <asm/hw_irq.h> 280 166 167 + #ifndef NR_IRQS_LEGACY 168 + # define NR_IRQS_LEGACY 0 169 + #endif 170 + 171 + #ifndef ARCH_IRQ_INIT_FLAGS 172 + # define ARCH_IRQ_INIT_FLAGS 0 173 + #endif 174 + 175 + #define IRQ_DEFAULT_INIT_FLAGS (IRQ_DISABLED | ARCH_IRQ_INIT_FLAGS) 176 + 177 + struct irqaction; 281 178 extern int setup_irq(unsigned int irq, struct irqaction *new); 282 179 extern void remove_irq(unsigned int irq, struct irqaction *act); 283 180 284 181 #ifdef CONFIG_GENERIC_HARDIRQS 285 182 286 - #ifdef CONFIG_SMP 287 - 288 - #ifdef CONFIG_GENERIC_PENDING_IRQ 289 - 183 + #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) 290 184 void move_native_irq(int irq); 291 185 void move_masked_irq(int irq); 292 - 293 - #else /* CONFIG_GENERIC_PENDING_IRQ */ 294 - 295 - static inline void move_irq(int irq) 296 - { 297 - } 298 - 299 - static inline void move_native_irq(int irq) 300 - { 301 - } 302 - 303 - static inline void move_masked_irq(int irq) 304 - { 305 - } 306 - 307 - #endif /* CONFIG_GENERIC_PENDING_IRQ */ 308 - 309 - #else /* CONFIG_SMP */ 310 - 311 - #define move_native_irq(x) 312 - #define move_masked_irq(x) 313 - 314 - #endif /* CONFIG_SMP */ 186 + #else 187 + static inline void move_native_irq(int irq) { } 188 + static inline void move_masked_irq(int irq) { } 189 + #endif 315 190 316 191 extern int no_irq_affinity; 317 - 318 - static inline int irq_balancing_disabled(unsigned int irq) 319 - { 320 - struct irq_desc *desc; 321 - 322 - desc = irq_to_desc(irq); 323 - return desc->status & IRQ_NO_BALANCING_MASK; 324 - } 325 192 326 193 /* Handle irq action chains: */ 327 194 extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action); ··· 250 293 extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); 251 294 extern void handle_nested_irq(unsigned int irq); 252 295 253 - /* 254 - * Monolithic do_IRQ implementation. 255 - */ 256 - #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ 257 - extern unsigned int __do_IRQ(unsigned int irq); 258 - #endif 259 - 260 - /* 261 - * Architectures call this to let the generic IRQ layer 262 - * handle an interrupt. If the descriptor is attached to an 263 - * irqchip-style controller then we call the ->handle_irq() handler, 264 - * and it calls __do_IRQ() if it's attached to an irqtype-style controller. 265 - */ 266 - static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc) 267 - { 268 - #ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ 269 - desc->handle_irq(irq, desc); 270 - #else 271 - if (likely(desc->handle_irq)) 272 - desc->handle_irq(irq, desc); 273 - else 274 - __do_IRQ(irq); 275 - #endif 276 - } 277 - 278 - static inline void generic_handle_irq(unsigned int irq) 279 - { 280 - generic_handle_irq_desc(irq, irq_to_desc(irq)); 281 - } 282 - 283 296 /* Handling of unhandled and spurious interrupts: */ 284 297 extern void note_interrupt(unsigned int irq, struct irq_desc *desc, 285 298 irqreturn_t action_ret); 286 299 287 - /* Resending of interrupts :*/ 288 - void check_irq_resend(struct irq_desc *desc, unsigned int irq); 289 300 290 301 /* Enable/disable irq debugging output: */ 291 302 extern int noirqdebug_setup(char *str); ··· 275 350 extern void 276 351 __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, 277 352 const char *name); 278 - 279 - /* caller has locked the irq_desc and both params are valid */ 280 - static inline void __set_irq_handler_unlocked(int irq, 281 - irq_flow_handler_t handler) 282 - { 283 - struct irq_desc *desc; 284 - 285 - desc = irq_to_desc(irq); 286 - desc->handle_irq = handler; 287 - } 288 353 289 354 /* 290 355 * Set a highlevel flow handler for a given IRQ: ··· 299 384 300 385 extern void set_irq_nested_thread(unsigned int irq, int nest); 301 386 302 - extern void set_irq_noprobe(unsigned int irq); 303 - extern void set_irq_probe(unsigned int irq); 387 + void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set); 388 + 389 + static inline void irq_set_status_flags(unsigned int irq, unsigned long set) 390 + { 391 + irq_modify_status(irq, 0, set); 392 + } 393 + 394 + static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr) 395 + { 396 + irq_modify_status(irq, clr, 0); 397 + } 398 + 399 + static inline void set_irq_noprobe(unsigned int irq) 400 + { 401 + irq_modify_status(irq, 0, IRQ_NOPROBE); 402 + } 403 + 404 + static inline void set_irq_probe(unsigned int irq) 405 + { 406 + irq_modify_status(irq, IRQ_NOPROBE, 0); 407 + } 304 408 305 409 /* Handle dynamic irq creation and destruction */ 306 410 extern unsigned int create_irq_nr(unsigned int irq_want, int node); 307 411 extern int create_irq(void); 308 412 extern void destroy_irq(unsigned int irq); 309 413 310 - /* Test to see if a driver has successfully requested an irq */ 311 - static inline int irq_has_action(unsigned int irq) 312 - { 313 - struct irq_desc *desc = irq_to_desc(irq); 314 - return desc->action != NULL; 315 - } 316 - 317 - /* Dynamic irq helper functions */ 318 - extern void dynamic_irq_init(unsigned int irq); 319 - void dynamic_irq_init_keep_chip_data(unsigned int irq); 414 + /* 415 + * Dynamic irq helper functions. Obsolete. Use irq_alloc_desc* and 416 + * irq_free_desc instead. 417 + */ 320 418 extern void dynamic_irq_cleanup(unsigned int irq); 321 - void dynamic_irq_cleanup_keep_chip_data(unsigned int irq); 419 + static inline void dynamic_irq_init(unsigned int irq) 420 + { 421 + dynamic_irq_cleanup(irq); 422 + } 322 423 323 424 /* Set/get chip/data for an IRQ: */ 324 425 extern int set_irq_chip(unsigned int irq, struct irq_chip *chip); ··· 342 411 extern int set_irq_chip_data(unsigned int irq, void *data); 343 412 extern int set_irq_type(unsigned int irq, unsigned int type); 344 413 extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); 414 + extern struct irq_data *irq_get_irq_data(unsigned int irq); 345 415 346 - #define get_irq_chip(irq) (irq_to_desc(irq)->chip) 347 - #define get_irq_chip_data(irq) (irq_to_desc(irq)->chip_data) 348 - #define get_irq_data(irq) (irq_to_desc(irq)->handler_data) 349 - #define get_irq_msi(irq) (irq_to_desc(irq)->msi_desc) 416 + static inline struct irq_chip *get_irq_chip(unsigned int irq) 417 + { 418 + struct irq_data *d = irq_get_irq_data(irq); 419 + return d ? d->chip : NULL; 420 + } 350 421 351 - #define get_irq_desc_chip(desc) ((desc)->chip) 352 - #define get_irq_desc_chip_data(desc) ((desc)->chip_data) 353 - #define get_irq_desc_data(desc) ((desc)->handler_data) 354 - #define get_irq_desc_msi(desc) ((desc)->msi_desc) 422 + static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d) 423 + { 424 + return d->chip; 425 + } 426 + 427 + static inline void *get_irq_chip_data(unsigned int irq) 428 + { 429 + struct irq_data *d = irq_get_irq_data(irq); 430 + return d ? d->chip_data : NULL; 431 + } 432 + 433 + static inline void *irq_data_get_irq_chip_data(struct irq_data *d) 434 + { 435 + return d->chip_data; 436 + } 437 + 438 + static inline void *get_irq_data(unsigned int irq) 439 + { 440 + struct irq_data *d = irq_get_irq_data(irq); 441 + return d ? d->handler_data : NULL; 442 + } 443 + 444 + static inline void *irq_data_get_irq_data(struct irq_data *d) 445 + { 446 + return d->handler_data; 447 + } 448 + 449 + static inline struct msi_desc *get_irq_msi(unsigned int irq) 450 + { 451 + struct irq_data *d = irq_get_irq_data(irq); 452 + return d ? d->msi_desc : NULL; 453 + } 454 + 455 + static inline struct msi_desc *irq_data_get_msi(struct irq_data *d) 456 + { 457 + return d->msi_desc; 458 + } 459 + 460 + int irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node); 461 + void irq_free_descs(unsigned int irq, unsigned int cnt); 462 + int irq_reserve_irqs(unsigned int from, unsigned int cnt); 463 + 464 + static inline int irq_alloc_desc(int node) 465 + { 466 + return irq_alloc_descs(-1, 0, 1, node); 467 + } 468 + 469 + static inline int irq_alloc_desc_at(unsigned int at, int node) 470 + { 471 + return irq_alloc_descs(at, at, 1, node); 472 + } 473 + 474 + static inline int irq_alloc_desc_from(unsigned int from, int node) 475 + { 476 + return irq_alloc_descs(-1, from, 1, node); 477 + } 478 + 479 + static inline void irq_free_desc(unsigned int irq) 480 + { 481 + irq_free_descs(irq, 1); 482 + } 355 483 356 484 #endif /* CONFIG_GENERIC_HARDIRQS */ 357 485 358 486 #endif /* !CONFIG_S390 */ 359 - 360 - #ifdef CONFIG_SMP 361 - /** 362 - * alloc_desc_masks - allocate cpumasks for irq_desc 363 - * @desc: pointer to irq_desc struct 364 - * @node: node which will be handling the cpumasks 365 - * @boot: true if need bootmem 366 - * 367 - * Allocates affinity and pending_mask cpumask if required. 368 - * Returns true if successful (or not required). 369 - */ 370 - static inline bool alloc_desc_masks(struct irq_desc *desc, int node, 371 - bool boot) 372 - { 373 - gfp_t gfp = GFP_ATOMIC; 374 - 375 - if (boot) 376 - gfp = GFP_NOWAIT; 377 - 378 - #ifdef CONFIG_CPUMASK_OFFSTACK 379 - if (!alloc_cpumask_var_node(&desc->affinity, gfp, node)) 380 - return false; 381 - 382 - #ifdef CONFIG_GENERIC_PENDING_IRQ 383 - if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { 384 - free_cpumask_var(desc->affinity); 385 - return false; 386 - } 387 - #endif 388 - #endif 389 - return true; 390 - } 391 - 392 - static inline void init_desc_masks(struct irq_desc *desc) 393 - { 394 - cpumask_setall(desc->affinity); 395 - #ifdef CONFIG_GENERIC_PENDING_IRQ 396 - cpumask_clear(desc->pending_mask); 397 - #endif 398 - } 399 - 400 - /** 401 - * init_copy_desc_masks - copy cpumasks for irq_desc 402 - * @old_desc: pointer to old irq_desc struct 403 - * @new_desc: pointer to new irq_desc struct 404 - * 405 - * Insures affinity and pending_masks are copied to new irq_desc. 406 - * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the 407 - * irq_desc struct so the copy is redundant. 408 - */ 409 - 410 - static inline void init_copy_desc_masks(struct irq_desc *old_desc, 411 - struct irq_desc *new_desc) 412 - { 413 - #ifdef CONFIG_CPUMASK_OFFSTACK 414 - cpumask_copy(new_desc->affinity, old_desc->affinity); 415 - 416 - #ifdef CONFIG_GENERIC_PENDING_IRQ 417 - cpumask_copy(new_desc->pending_mask, old_desc->pending_mask); 418 - #endif 419 - #endif 420 - } 421 - 422 - static inline void free_desc_masks(struct irq_desc *old_desc, 423 - struct irq_desc *new_desc) 424 - { 425 - free_cpumask_var(old_desc->affinity); 426 - 427 - #ifdef CONFIG_GENERIC_PENDING_IRQ 428 - free_cpumask_var(old_desc->pending_mask); 429 - #endif 430 - } 431 - 432 - #else /* !CONFIG_SMP */ 433 - 434 - static inline bool alloc_desc_masks(struct irq_desc *desc, int node, 435 - bool boot) 436 - { 437 - return true; 438 - } 439 - 440 - static inline void init_desc_masks(struct irq_desc *desc) 441 - { 442 - } 443 - 444 - static inline void init_copy_desc_masks(struct irq_desc *old_desc, 445 - struct irq_desc *new_desc) 446 - { 447 - } 448 - 449 - static inline void free_desc_masks(struct irq_desc *old_desc, 450 - struct irq_desc *new_desc) 451 - { 452 - } 453 - #endif /* CONFIG_SMP */ 454 487 455 488 #endif /* _LINUX_IRQ_H */
+159
include/linux/irqdesc.h
··· 1 + #ifndef _LINUX_IRQDESC_H 2 + #define _LINUX_IRQDESC_H 3 + 4 + /* 5 + * Core internal functions to deal with irq descriptors 6 + * 7 + * This include will move to kernel/irq once we cleaned up the tree. 8 + * For now it's included from <linux/irq.h> 9 + */ 10 + 11 + struct proc_dir_entry; 12 + struct timer_rand_state; 13 + /** 14 + * struct irq_desc - interrupt descriptor 15 + * @irq_data: per irq and chip data passed down to chip functions 16 + * @timer_rand_state: pointer to timer rand state struct 17 + * @kstat_irqs: irq stats per cpu 18 + * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] 19 + * @action: the irq action chain 20 + * @status: status information 21 + * @depth: disable-depth, for nested irq_disable() calls 22 + * @wake_depth: enable depth, for multiple set_irq_wake() callers 23 + * @irq_count: stats field to detect stalled irqs 24 + * @last_unhandled: aging timer for unhandled count 25 + * @irqs_unhandled: stats field for spurious unhandled interrupts 26 + * @lock: locking for SMP 27 + * @pending_mask: pending rebalanced interrupts 28 + * @threads_active: number of irqaction threads currently running 29 + * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers 30 + * @dir: /proc/irq/ procfs entry 31 + * @name: flow handler name for /proc/interrupts output 32 + */ 33 + struct irq_desc { 34 + 35 + #ifdef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED 36 + struct irq_data irq_data; 37 + #else 38 + /* 39 + * This union will go away, once we fixed the direct access to 40 + * irq_desc all over the place. The direct fields are a 1:1 41 + * overlay of irq_data. 42 + */ 43 + union { 44 + struct irq_data irq_data; 45 + struct { 46 + unsigned int irq; 47 + unsigned int node; 48 + struct irq_chip *chip; 49 + void *handler_data; 50 + void *chip_data; 51 + struct msi_desc *msi_desc; 52 + #ifdef CONFIG_SMP 53 + cpumask_var_t affinity; 54 + #endif 55 + }; 56 + }; 57 + #endif 58 + 59 + struct timer_rand_state *timer_rand_state; 60 + unsigned int *kstat_irqs; 61 + irq_flow_handler_t handle_irq; 62 + struct irqaction *action; /* IRQ action list */ 63 + unsigned int status; /* IRQ status */ 64 + 65 + unsigned int depth; /* nested irq disables */ 66 + unsigned int wake_depth; /* nested wake enables */ 67 + unsigned int irq_count; /* For detecting broken IRQs */ 68 + unsigned long last_unhandled; /* Aging timer for unhandled count */ 69 + unsigned int irqs_unhandled; 70 + raw_spinlock_t lock; 71 + #ifdef CONFIG_SMP 72 + const struct cpumask *affinity_hint; 73 + #ifdef CONFIG_GENERIC_PENDING_IRQ 74 + cpumask_var_t pending_mask; 75 + #endif 76 + #endif 77 + atomic_t threads_active; 78 + wait_queue_head_t wait_for_threads; 79 + #ifdef CONFIG_PROC_FS 80 + struct proc_dir_entry *dir; 81 + #endif 82 + const char *name; 83 + } ____cacheline_internodealigned_in_smp; 84 + 85 + #ifndef CONFIG_SPARSE_IRQ 86 + extern struct irq_desc irq_desc[NR_IRQS]; 87 + #endif 88 + 89 + /* Will be removed once the last users in power and sh are gone */ 90 + extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node); 91 + static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) 92 + { 93 + return desc; 94 + } 95 + 96 + #ifdef CONFIG_GENERIC_HARDIRQS 97 + 98 + #define get_irq_desc_chip(desc) ((desc)->irq_data.chip) 99 + #define get_irq_desc_chip_data(desc) ((desc)->irq_data.chip_data) 100 + #define get_irq_desc_data(desc) ((desc)->irq_data.handler_data) 101 + #define get_irq_desc_msi(desc) ((desc)->irq_data.msi_desc) 102 + 103 + /* 104 + * Monolithic do_IRQ implementation. 105 + */ 106 + #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ 107 + extern unsigned int __do_IRQ(unsigned int irq); 108 + #endif 109 + 110 + /* 111 + * Architectures call this to let the generic IRQ layer 112 + * handle an interrupt. If the descriptor is attached to an 113 + * irqchip-style controller then we call the ->handle_irq() handler, 114 + * and it calls __do_IRQ() if it's attached to an irqtype-style controller. 115 + */ 116 + static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc) 117 + { 118 + #ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ 119 + desc->handle_irq(irq, desc); 120 + #else 121 + if (likely(desc->handle_irq)) 122 + desc->handle_irq(irq, desc); 123 + else 124 + __do_IRQ(irq); 125 + #endif 126 + } 127 + 128 + static inline void generic_handle_irq(unsigned int irq) 129 + { 130 + generic_handle_irq_desc(irq, irq_to_desc(irq)); 131 + } 132 + 133 + /* Test to see if a driver has successfully requested an irq */ 134 + static inline int irq_has_action(unsigned int irq) 135 + { 136 + struct irq_desc *desc = irq_to_desc(irq); 137 + return desc->action != NULL; 138 + } 139 + 140 + static inline int irq_balancing_disabled(unsigned int irq) 141 + { 142 + struct irq_desc *desc; 143 + 144 + desc = irq_to_desc(irq); 145 + return desc->status & IRQ_NO_BALANCING_MASK; 146 + } 147 + 148 + /* caller has locked the irq_desc and both params are valid */ 149 + static inline void __set_irq_handler_unlocked(int irq, 150 + irq_flow_handler_t handler) 151 + { 152 + struct irq_desc *desc; 153 + 154 + desc = irq_to_desc(irq); 155 + desc->handle_irq = handler; 156 + } 157 + #endif 158 + 159 + #endif
+5
include/linux/irqnr.h
··· 25 25 26 26 extern int nr_irqs; 27 27 extern struct irq_desc *irq_to_desc(unsigned int irq); 28 + unsigned int irq_get_next_irq(unsigned int offset); 28 29 29 30 # define for_each_irq_desc(irq, desc) \ 30 31 for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; \ ··· 47 46 #else 48 47 #define irq_node(irq) 0 49 48 #endif 49 + 50 + # define for_each_active_irq(irq) \ 51 + for (irq = irq_get_next_irq(0); irq < nr_irqs; \ 52 + irq = irq_get_next_irq(irq + 1)) 50 53 51 54 #endif /* CONFIG_GENERIC_HARDIRQS */ 52 55
-8
include/linux/lockdep.h
··· 435 435 436 436 #endif /* CONFIG_LOCKDEP */ 437 437 438 - #ifdef CONFIG_GENERIC_HARDIRQS 439 - extern void early_init_irq_lock_class(void); 440 - #else 441 - static inline void early_init_irq_lock_class(void) 442 - { 443 - } 444 - #endif 445 - 446 438 #ifdef CONFIG_TRACE_IRQFLAGS 447 439 extern void early_boot_irqs_off(void); 448 440 extern void early_boot_irqs_on(void);
+7 -6
include/linux/msi.h
··· 10 10 }; 11 11 12 12 /* Helper functions */ 13 - struct irq_desc; 14 - extern void mask_msi_irq(unsigned int irq); 15 - extern void unmask_msi_irq(unsigned int irq); 16 - extern void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg); 17 - extern void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg); 18 - extern void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg); 13 + struct irq_data; 14 + struct msi_desc; 15 + extern void mask_msi_irq(struct irq_data *data); 16 + extern void unmask_msi_irq(struct irq_data *data); 17 + extern void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); 18 + extern void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg); 19 + extern void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); 19 20 extern void read_msi_msg(unsigned int irq, struct msi_msg *msg); 20 21 extern void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); 21 22 extern void write_msi_msg(unsigned int irq, struct msi_msg *msg);
+2
init/Kconfig
··· 339 339 depends on AUDITSYSCALL 340 340 select FSNOTIFY 341 341 342 + source "kernel/irq/Kconfig" 343 + 342 344 menu "RCU Subsystem" 343 345 344 346 choice
-1
init/main.c
··· 556 556 557 557 local_irq_disable(); 558 558 early_boot_irqs_off(); 559 - early_init_irq_lock_class(); 560 559 561 560 /* 562 561 * Interrupts are still disabled. Do necessary setups, then
+53
kernel/irq/Kconfig
··· 1 + config HAVE_GENERIC_HARDIRQS 2 + def_bool n 3 + 4 + if HAVE_GENERIC_HARDIRQS 5 + menu "IRQ subsystem" 6 + # 7 + # Interrupt subsystem related configuration options 8 + # 9 + config GENERIC_HARDIRQS 10 + def_bool y 11 + 12 + config GENERIC_HARDIRQS_NO__DO_IRQ 13 + def_bool y 14 + 15 + # Select this to disable the deprecated stuff 16 + config GENERIC_HARDIRQS_NO_DEPRECATED 17 + def_bool n 18 + 19 + # Options selectable by the architecture code 20 + config HAVE_SPARSE_IRQ 21 + def_bool n 22 + 23 + config GENERIC_IRQ_PROBE 24 + def_bool n 25 + 26 + config GENERIC_PENDING_IRQ 27 + def_bool n 28 + 29 + config AUTO_IRQ_AFFINITY 30 + def_bool n 31 + 32 + config IRQ_PER_CPU 33 + def_bool n 34 + 35 + config HARDIRQS_SW_RESEND 36 + def_bool n 37 + 38 + config SPARSE_IRQ 39 + bool "Support sparse irq numbering" 40 + depends on HAVE_SPARSE_IRQ 41 + ---help--- 42 + 43 + Sparse irq numbering is useful for distro kernels that want 44 + to define a high CONFIG_NR_CPUS value but still want to have 45 + low kernel memory footprint on smaller machines. 46 + 47 + ( Sparse irqs can also be beneficial on NUMA boxes, as they spread 48 + out the interrupt descriptors in a more NUMA-friendly way. ) 49 + 50 + If you don't know what to do here, say N. 51 + 52 + endmenu 53 + endif
+1 -2
kernel/irq/Makefile
··· 1 1 2 - obj-y := handle.o manage.o spurious.o resend.o chip.o devres.o 2 + obj-y := irqdesc.o handle.o manage.o spurious.o resend.o chip.o dummychip.o devres.o 3 3 obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o 4 4 obj-$(CONFIG_PROC_FS) += proc.o 5 5 obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o 6 - obj-$(CONFIG_NUMA_IRQ_DESC) += numa_migrate.o 7 6 obj-$(CONFIG_PM_SLEEP) += pm.o
+8 -7
kernel/irq/autoprobe.c
··· 57 57 * Some chips need to know about probing in 58 58 * progress: 59 59 */ 60 - if (desc->chip->set_type) 61 - desc->chip->set_type(i, IRQ_TYPE_PROBE); 62 - desc->chip->startup(i); 60 + if (desc->irq_data.chip->irq_set_type) 61 + desc->irq_data.chip->irq_set_type(&desc->irq_data, 62 + IRQ_TYPE_PROBE); 63 + desc->irq_data.chip->irq_startup(&desc->irq_data); 63 64 } 64 65 raw_spin_unlock_irq(&desc->lock); 65 66 } ··· 77 76 raw_spin_lock_irq(&desc->lock); 78 77 if (!desc->action && !(desc->status & IRQ_NOPROBE)) { 79 78 desc->status |= IRQ_AUTODETECT | IRQ_WAITING; 80 - if (desc->chip->startup(i)) 79 + if (desc->irq_data.chip->irq_startup(&desc->irq_data)) 81 80 desc->status |= IRQ_PENDING; 82 81 } 83 82 raw_spin_unlock_irq(&desc->lock); ··· 99 98 /* It triggered already - consider it spurious. */ 100 99 if (!(status & IRQ_WAITING)) { 101 100 desc->status = status & ~IRQ_AUTODETECT; 102 - desc->chip->shutdown(i); 101 + desc->irq_data.chip->irq_shutdown(&desc->irq_data); 103 102 } else 104 103 if (i < 32) 105 104 mask |= 1 << i; ··· 138 137 mask |= 1 << i; 139 138 140 139 desc->status = status & ~IRQ_AUTODETECT; 141 - desc->chip->shutdown(i); 140 + desc->irq_data.chip->irq_shutdown(&desc->irq_data); 142 141 } 143 142 raw_spin_unlock_irq(&desc->lock); 144 143 } ··· 182 181 nr_of_irqs++; 183 182 } 184 183 desc->status = status & ~IRQ_AUTODETECT; 185 - desc->chip->shutdown(i); 184 + desc->irq_data.chip->irq_shutdown(&desc->irq_data); 186 185 } 187 186 raw_spin_unlock_irq(&desc->lock); 188 187 }
+198 -182
kernel/irq/chip.c
··· 18 18 19 19 #include "internals.h" 20 20 21 - static void dynamic_irq_init_x(unsigned int irq, bool keep_chip_data) 22 - { 23 - struct irq_desc *desc; 24 - unsigned long flags; 25 - 26 - desc = irq_to_desc(irq); 27 - if (!desc) { 28 - WARN(1, KERN_ERR "Trying to initialize invalid IRQ%d\n", irq); 29 - return; 30 - } 31 - 32 - /* Ensure we don't have left over values from a previous use of this irq */ 33 - raw_spin_lock_irqsave(&desc->lock, flags); 34 - desc->status = IRQ_DISABLED; 35 - desc->chip = &no_irq_chip; 36 - desc->handle_irq = handle_bad_irq; 37 - desc->depth = 1; 38 - desc->msi_desc = NULL; 39 - desc->handler_data = NULL; 40 - if (!keep_chip_data) 41 - desc->chip_data = NULL; 42 - desc->action = NULL; 43 - desc->irq_count = 0; 44 - desc->irqs_unhandled = 0; 45 - #ifdef CONFIG_SMP 46 - cpumask_setall(desc->affinity); 47 - #ifdef CONFIG_GENERIC_PENDING_IRQ 48 - cpumask_clear(desc->pending_mask); 49 - #endif 50 - #endif 51 - raw_spin_unlock_irqrestore(&desc->lock, flags); 52 - } 53 - 54 - /** 55 - * dynamic_irq_init - initialize a dynamically allocated irq 56 - * @irq: irq number to initialize 57 - */ 58 - void dynamic_irq_init(unsigned int irq) 59 - { 60 - dynamic_irq_init_x(irq, false); 61 - } 62 - 63 - /** 64 - * dynamic_irq_init_keep_chip_data - initialize a dynamically allocated irq 65 - * @irq: irq number to initialize 66 - * 67 - * does not set irq_to_desc(irq)->chip_data to NULL 68 - */ 69 - void dynamic_irq_init_keep_chip_data(unsigned int irq) 70 - { 71 - dynamic_irq_init_x(irq, true); 72 - } 73 - 74 - static void dynamic_irq_cleanup_x(unsigned int irq, bool keep_chip_data) 75 - { 76 - struct irq_desc *desc = irq_to_desc(irq); 77 - unsigned long flags; 78 - 79 - if (!desc) { 80 - WARN(1, KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq); 81 - return; 82 - } 83 - 84 - raw_spin_lock_irqsave(&desc->lock, flags); 85 - if (desc->action) { 86 - raw_spin_unlock_irqrestore(&desc->lock, flags); 87 - WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n", 88 - irq); 89 - return; 90 - } 91 - desc->msi_desc = NULL; 92 - desc->handler_data = NULL; 93 - if (!keep_chip_data) 94 - desc->chip_data = NULL; 95 - desc->handle_irq = handle_bad_irq; 96 - desc->chip = &no_irq_chip; 97 - desc->name = NULL; 98 - clear_kstat_irqs(desc); 99 - raw_spin_unlock_irqrestore(&desc->lock, flags); 100 - } 101 - 102 - /** 103 - * dynamic_irq_cleanup - cleanup a dynamically allocated irq 104 - * @irq: irq number to initialize 105 - */ 106 - void dynamic_irq_cleanup(unsigned int irq) 107 - { 108 - dynamic_irq_cleanup_x(irq, false); 109 - } 110 - 111 - /** 112 - * dynamic_irq_cleanup_keep_chip_data - cleanup a dynamically allocated irq 113 - * @irq: irq number to initialize 114 - * 115 - * does not set irq_to_desc(irq)->chip_data to NULL 116 - */ 117 - void dynamic_irq_cleanup_keep_chip_data(unsigned int irq) 118 - { 119 - dynamic_irq_cleanup_x(irq, true); 120 - } 121 - 122 - 123 21 /** 124 22 * set_irq_chip - set the irq chip for an irq 125 23 * @irq: irq number ··· 38 140 39 141 raw_spin_lock_irqsave(&desc->lock, flags); 40 142 irq_chip_set_defaults(chip); 41 - desc->chip = chip; 143 + desc->irq_data.chip = chip; 42 144 raw_spin_unlock_irqrestore(&desc->lock, flags); 43 145 44 146 return 0; ··· 91 193 } 92 194 93 195 raw_spin_lock_irqsave(&desc->lock, flags); 94 - desc->handler_data = data; 196 + desc->irq_data.handler_data = data; 95 197 raw_spin_unlock_irqrestore(&desc->lock, flags); 96 198 return 0; 97 199 } ··· 116 218 } 117 219 118 220 raw_spin_lock_irqsave(&desc->lock, flags); 119 - desc->msi_desc = entry; 221 + desc->irq_data.msi_desc = entry; 120 222 if (entry) 121 223 entry->irq = irq; 122 224 raw_spin_unlock_irqrestore(&desc->lock, flags); ··· 141 243 return -EINVAL; 142 244 } 143 245 144 - if (!desc->chip) { 246 + if (!desc->irq_data.chip) { 145 247 printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq); 146 248 return -EINVAL; 147 249 } 148 250 149 251 raw_spin_lock_irqsave(&desc->lock, flags); 150 - desc->chip_data = data; 252 + desc->irq_data.chip_data = data; 151 253 raw_spin_unlock_irqrestore(&desc->lock, flags); 152 254 153 255 return 0; 154 256 } 155 257 EXPORT_SYMBOL(set_irq_chip_data); 258 + 259 + struct irq_data *irq_get_irq_data(unsigned int irq) 260 + { 261 + struct irq_desc *desc = irq_to_desc(irq); 262 + 263 + return desc ? &desc->irq_data : NULL; 264 + } 265 + EXPORT_SYMBOL_GPL(irq_get_irq_data); 156 266 157 267 /** 158 268 * set_irq_nested_thread - Set/Reset the IRQ_NESTED_THREAD flag of an irq ··· 193 287 /* 194 288 * default enable function 195 289 */ 196 - static void default_enable(unsigned int irq) 290 + static void default_enable(struct irq_data *data) 197 291 { 198 - struct irq_desc *desc = irq_to_desc(irq); 292 + struct irq_desc *desc = irq_data_to_desc(data); 199 293 200 - desc->chip->unmask(irq); 294 + desc->irq_data.chip->irq_unmask(&desc->irq_data); 201 295 desc->status &= ~IRQ_MASKED; 202 296 } 203 297 204 298 /* 205 299 * default disable function 206 300 */ 207 - static void default_disable(unsigned int irq) 301 + static void default_disable(struct irq_data *data) 208 302 { 209 303 } 210 304 211 305 /* 212 306 * default startup function 213 307 */ 214 - static unsigned int default_startup(unsigned int irq) 308 + static unsigned int default_startup(struct irq_data *data) 215 309 { 216 - struct irq_desc *desc = irq_to_desc(irq); 310 + struct irq_desc *desc = irq_data_to_desc(data); 217 311 218 - desc->chip->enable(irq); 312 + desc->irq_data.chip->irq_enable(data); 219 313 return 0; 220 314 } 221 315 222 316 /* 223 317 * default shutdown function 224 318 */ 225 - static void default_shutdown(unsigned int irq) 319 + static void default_shutdown(struct irq_data *data) 226 320 { 227 - struct irq_desc *desc = irq_to_desc(irq); 321 + struct irq_desc *desc = irq_data_to_desc(data); 228 322 229 - desc->chip->mask(irq); 323 + desc->irq_data.chip->irq_mask(&desc->irq_data); 230 324 desc->status |= IRQ_MASKED; 231 325 } 326 + 327 + #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED 328 + /* Temporary migration helpers */ 329 + static void compat_irq_mask(struct irq_data *data) 330 + { 331 + data->chip->mask(data->irq); 332 + } 333 + 334 + static void compat_irq_unmask(struct irq_data *data) 335 + { 336 + data->chip->unmask(data->irq); 337 + } 338 + 339 + static void compat_irq_ack(struct irq_data *data) 340 + { 341 + data->chip->ack(data->irq); 342 + } 343 + 344 + static void compat_irq_mask_ack(struct irq_data *data) 345 + { 346 + data->chip->mask_ack(data->irq); 347 + } 348 + 349 + static void compat_irq_eoi(struct irq_data *data) 350 + { 351 + data->chip->eoi(data->irq); 352 + } 353 + 354 + static void compat_irq_enable(struct irq_data *data) 355 + { 356 + data->chip->enable(data->irq); 357 + } 358 + 359 + static void compat_irq_disable(struct irq_data *data) 360 + { 361 + data->chip->disable(data->irq); 362 + } 363 + 364 + static void compat_irq_shutdown(struct irq_data *data) 365 + { 366 + data->chip->shutdown(data->irq); 367 + } 368 + 369 + static unsigned int compat_irq_startup(struct irq_data *data) 370 + { 371 + return data->chip->startup(data->irq); 372 + } 373 + 374 + static int compat_irq_set_affinity(struct irq_data *data, 375 + const struct cpumask *dest, bool force) 376 + { 377 + return data->chip->set_affinity(data->irq, dest); 378 + } 379 + 380 + static int compat_irq_set_type(struct irq_data *data, unsigned int type) 381 + { 382 + return data->chip->set_type(data->irq, type); 383 + } 384 + 385 + static int compat_irq_set_wake(struct irq_data *data, unsigned int on) 386 + { 387 + return data->chip->set_wake(data->irq, on); 388 + } 389 + 390 + static int compat_irq_retrigger(struct irq_data *data) 391 + { 392 + return data->chip->retrigger(data->irq); 393 + } 394 + 395 + static void compat_bus_lock(struct irq_data *data) 396 + { 397 + data->chip->bus_lock(data->irq); 398 + } 399 + 400 + static void compat_bus_sync_unlock(struct irq_data *data) 401 + { 402 + data->chip->bus_sync_unlock(data->irq); 403 + } 404 + #endif 232 405 233 406 /* 234 407 * Fixup enable/disable function pointers 235 408 */ 236 409 void irq_chip_set_defaults(struct irq_chip *chip) 237 410 { 238 - if (!chip->enable) 239 - chip->enable = default_enable; 240 - if (!chip->disable) 241 - chip->disable = default_disable; 242 - if (!chip->startup) 243 - chip->startup = default_startup; 411 + #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED 244 412 /* 245 - * We use chip->disable, when the user provided its own. When 246 - * we have default_disable set for chip->disable, then we need 413 + * Compat fixup functions need to be before we set the 414 + * defaults for enable/disable/startup/shutdown 415 + */ 416 + if (chip->enable) 417 + chip->irq_enable = compat_irq_enable; 418 + if (chip->disable) 419 + chip->irq_disable = compat_irq_disable; 420 + if (chip->shutdown) 421 + chip->irq_shutdown = compat_irq_shutdown; 422 + if (chip->startup) 423 + chip->irq_startup = compat_irq_startup; 424 + #endif 425 + /* 426 + * The real defaults 427 + */ 428 + if (!chip->irq_enable) 429 + chip->irq_enable = default_enable; 430 + if (!chip->irq_disable) 431 + chip->irq_disable = default_disable; 432 + if (!chip->irq_startup) 433 + chip->irq_startup = default_startup; 434 + /* 435 + * We use chip->irq_disable, when the user provided its own. When 436 + * we have default_disable set for chip->irq_disable, then we need 247 437 * to use default_shutdown, otherwise the irq line is not 248 438 * disabled on free_irq(): 249 439 */ 250 - if (!chip->shutdown) 251 - chip->shutdown = chip->disable != default_disable ? 252 - chip->disable : default_shutdown; 253 - if (!chip->name) 254 - chip->name = chip->typename; 440 + if (!chip->irq_shutdown) 441 + chip->irq_shutdown = chip->irq_disable != default_disable ? 442 + chip->irq_disable : default_shutdown; 443 + 444 + #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED 255 445 if (!chip->end) 256 446 chip->end = dummy_irq_chip.end; 447 + 448 + /* 449 + * Now fix up the remaining compat handlers 450 + */ 451 + if (chip->bus_lock) 452 + chip->irq_bus_lock = compat_bus_lock; 453 + if (chip->bus_sync_unlock) 454 + chip->irq_bus_sync_unlock = compat_bus_sync_unlock; 455 + if (chip->mask) 456 + chip->irq_mask = compat_irq_mask; 457 + if (chip->unmask) 458 + chip->irq_unmask = compat_irq_unmask; 459 + if (chip->ack) 460 + chip->irq_ack = compat_irq_ack; 461 + if (chip->mask_ack) 462 + chip->irq_mask_ack = compat_irq_mask_ack; 463 + if (chip->eoi) 464 + chip->irq_eoi = compat_irq_eoi; 465 + if (chip->set_affinity) 466 + chip->irq_set_affinity = compat_irq_set_affinity; 467 + if (chip->set_type) 468 + chip->irq_set_type = compat_irq_set_type; 469 + if (chip->set_wake) 470 + chip->irq_set_wake = compat_irq_set_wake; 471 + if (chip->retrigger) 472 + chip->irq_retrigger = compat_irq_retrigger; 473 + #endif 257 474 } 258 475 259 - static inline void mask_ack_irq(struct irq_desc *desc, int irq) 476 + static inline void mask_ack_irq(struct irq_desc *desc) 260 477 { 261 - if (desc->chip->mask_ack) 262 - desc->chip->mask_ack(irq); 478 + if (desc->irq_data.chip->irq_mask_ack) 479 + desc->irq_data.chip->irq_mask_ack(&desc->irq_data); 263 480 else { 264 - desc->chip->mask(irq); 265 - if (desc->chip->ack) 266 - desc->chip->ack(irq); 481 + desc->irq_data.chip->irq_mask(&desc->irq_data); 482 + if (desc->irq_data.chip->irq_ack) 483 + desc->irq_data.chip->irq_ack(&desc->irq_data); 267 484 } 268 485 desc->status |= IRQ_MASKED; 269 486 } 270 487 271 - static inline void mask_irq(struct irq_desc *desc, int irq) 488 + static inline void mask_irq(struct irq_desc *desc) 272 489 { 273 - if (desc->chip->mask) { 274 - desc->chip->mask(irq); 490 + if (desc->irq_data.chip->irq_mask) { 491 + desc->irq_data.chip->irq_mask(&desc->irq_data); 275 492 desc->status |= IRQ_MASKED; 276 493 } 277 494 } 278 495 279 - static inline void unmask_irq(struct irq_desc *desc, int irq) 496 + static inline void unmask_irq(struct irq_desc *desc) 280 497 { 281 - if (desc->chip->unmask) { 282 - desc->chip->unmask(irq); 498 + if (desc->irq_data.chip->irq_unmask) { 499 + desc->irq_data.chip->irq_unmask(&desc->irq_data); 283 500 desc->status &= ~IRQ_MASKED; 284 501 } 285 502 } ··· 505 476 irqreturn_t action_ret; 506 477 507 478 raw_spin_lock(&desc->lock); 508 - mask_ack_irq(desc, irq); 479 + mask_ack_irq(desc); 509 480 510 481 if (unlikely(desc->status & IRQ_INPROGRESS)) 511 482 goto out_unlock; ··· 531 502 desc->status &= ~IRQ_INPROGRESS; 532 503 533 504 if (!(desc->status & (IRQ_DISABLED | IRQ_ONESHOT))) 534 - unmask_irq(desc, irq); 505 + unmask_irq(desc); 535 506 out_unlock: 536 507 raw_spin_unlock(&desc->lock); 537 508 } ··· 568 539 action = desc->action; 569 540 if (unlikely(!action || (desc->status & IRQ_DISABLED))) { 570 541 desc->status |= IRQ_PENDING; 571 - mask_irq(desc, irq); 542 + mask_irq(desc); 572 543 goto out; 573 544 } 574 545 ··· 583 554 raw_spin_lock(&desc->lock); 584 555 desc->status &= ~IRQ_INPROGRESS; 585 556 out: 586 - desc->chip->eoi(irq); 557 + desc->irq_data.chip->irq_eoi(&desc->irq_data); 587 558 588 559 raw_spin_unlock(&desc->lock); 589 560 } ··· 619 590 if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) || 620 591 !desc->action)) { 621 592 desc->status |= (IRQ_PENDING | IRQ_MASKED); 622 - mask_ack_irq(desc, irq); 593 + mask_ack_irq(desc); 623 594 goto out_unlock; 624 595 } 625 596 kstat_incr_irqs_this_cpu(irq, desc); 626 597 627 598 /* Start handling the irq */ 628 - if (desc->chip->ack) 629 - desc->chip->ack(irq); 599 + desc->irq_data.chip->irq_ack(&desc->irq_data); 630 600 631 601 /* Mark the IRQ currently in progress.*/ 632 602 desc->status |= IRQ_INPROGRESS; ··· 635 607 irqreturn_t action_ret; 636 608 637 609 if (unlikely(!action)) { 638 - mask_irq(desc, irq); 610 + mask_irq(desc); 639 611 goto out_unlock; 640 612 } 641 613 ··· 647 619 if (unlikely((desc->status & 648 620 (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) == 649 621 (IRQ_PENDING | IRQ_MASKED))) { 650 - unmask_irq(desc, irq); 622 + unmask_irq(desc); 651 623 } 652 624 653 625 desc->status &= ~IRQ_PENDING; ··· 678 650 679 651 kstat_incr_irqs_this_cpu(irq, desc); 680 652 681 - if (desc->chip->ack) 682 - desc->chip->ack(irq); 653 + if (desc->irq_data.chip->irq_ack) 654 + desc->irq_data.chip->irq_ack(&desc->irq_data); 683 655 684 656 action_ret = handle_IRQ_event(irq, desc->action); 685 657 if (!noirqdebug) 686 658 note_interrupt(irq, desc, action_ret); 687 659 688 - if (desc->chip->eoi) 689 - desc->chip->eoi(irq); 660 + if (desc->irq_data.chip->irq_eoi) 661 + desc->irq_data.chip->irq_eoi(&desc->irq_data); 690 662 } 691 663 692 664 void ··· 704 676 705 677 if (!handle) 706 678 handle = handle_bad_irq; 707 - else if (desc->chip == &no_irq_chip) { 679 + else if (desc->irq_data.chip == &no_irq_chip) { 708 680 printk(KERN_WARNING "Trying to install %sinterrupt handler " 709 681 "for IRQ%d\n", is_chained ? "chained " : "", irq); 710 682 /* ··· 714 686 * prevent us to setup the interrupt at all. Switch it to 715 687 * dummy_irq_chip for easy transition. 716 688 */ 717 - desc->chip = &dummy_irq_chip; 689 + desc->irq_data.chip = &dummy_irq_chip; 718 690 } 719 691 720 - chip_bus_lock(irq, desc); 692 + chip_bus_lock(desc); 721 693 raw_spin_lock_irqsave(&desc->lock, flags); 722 694 723 695 /* Uninstall? */ 724 696 if (handle == handle_bad_irq) { 725 - if (desc->chip != &no_irq_chip) 726 - mask_ack_irq(desc, irq); 697 + if (desc->irq_data.chip != &no_irq_chip) 698 + mask_ack_irq(desc); 727 699 desc->status |= IRQ_DISABLED; 728 700 desc->depth = 1; 729 701 } ··· 734 706 desc->status &= ~IRQ_DISABLED; 735 707 desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE; 736 708 desc->depth = 0; 737 - desc->chip->startup(irq); 709 + desc->irq_data.chip->irq_startup(&desc->irq_data); 738 710 } 739 711 raw_spin_unlock_irqrestore(&desc->lock, flags); 740 - chip_bus_sync_unlock(irq, desc); 712 + chip_bus_sync_unlock(desc); 741 713 } 742 714 EXPORT_SYMBOL_GPL(__set_irq_handler); 743 715 ··· 757 729 __set_irq_handler(irq, handle, 0, name); 758 730 } 759 731 760 - void set_irq_noprobe(unsigned int irq) 732 + void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) 761 733 { 762 734 struct irq_desc *desc = irq_to_desc(irq); 763 735 unsigned long flags; 764 736 765 - if (!desc) { 766 - printk(KERN_ERR "Trying to mark IRQ%d non-probeable\n", irq); 737 + if (!desc) 767 738 return; 768 - } 739 + 740 + /* Sanitize flags */ 741 + set &= IRQF_MODIFY_MASK; 742 + clr &= IRQF_MODIFY_MASK; 769 743 770 744 raw_spin_lock_irqsave(&desc->lock, flags); 771 - desc->status |= IRQ_NOPROBE; 772 - raw_spin_unlock_irqrestore(&desc->lock, flags); 773 - } 774 - 775 - void set_irq_probe(unsigned int irq) 776 - { 777 - struct irq_desc *desc = irq_to_desc(irq); 778 - unsigned long flags; 779 - 780 - if (!desc) { 781 - printk(KERN_ERR "Trying to mark IRQ%d probeable\n", irq); 782 - return; 783 - } 784 - 785 - raw_spin_lock_irqsave(&desc->lock, flags); 786 - desc->status &= ~IRQ_NOPROBE; 745 + desc->status &= ~clr; 746 + desc->status |= set; 787 747 raw_spin_unlock_irqrestore(&desc->lock, flags); 788 748 }
+68
kernel/irq/dummychip.c
··· 1 + /* 2 + * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 3 + * Copyright (C) 2005-2006, Thomas Gleixner, Russell King 4 + * 5 + * This file contains the dummy interrupt chip implementation 6 + */ 7 + #include <linux/interrupt.h> 8 + #include <linux/irq.h> 9 + 10 + #include "internals.h" 11 + 12 + /* 13 + * What should we do if we get a hw irq event on an illegal vector? 14 + * Each architecture has to answer this themself. 15 + */ 16 + static void ack_bad(struct irq_data *data) 17 + { 18 + struct irq_desc *desc = irq_data_to_desc(data); 19 + 20 + print_irq_desc(data->irq, desc); 21 + ack_bad_irq(data->irq); 22 + } 23 + 24 + /* 25 + * NOP functions 26 + */ 27 + static void noop(struct irq_data *data) { } 28 + 29 + static unsigned int noop_ret(struct irq_data *data) 30 + { 31 + return 0; 32 + } 33 + 34 + #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED 35 + static void compat_noop(unsigned int irq) { } 36 + #define END_INIT .end = compat_noop 37 + #else 38 + #define END_INIT 39 + #endif 40 + 41 + /* 42 + * Generic no controller implementation 43 + */ 44 + struct irq_chip no_irq_chip = { 45 + .name = "none", 46 + .irq_startup = noop_ret, 47 + .irq_shutdown = noop, 48 + .irq_enable = noop, 49 + .irq_disable = noop, 50 + .irq_ack = ack_bad, 51 + END_INIT 52 + }; 53 + 54 + /* 55 + * Generic dummy implementation which can be used for 56 + * real dumb interrupt sources 57 + */ 58 + struct irq_chip dummy_irq_chip = { 59 + .name = "dummy", 60 + .irq_startup = noop_ret, 61 + .irq_shutdown = noop, 62 + .irq_enable = noop, 63 + .irq_disable = noop, 64 + .irq_ack = noop, 65 + .irq_mask = noop, 66 + .irq_unmask = noop, 67 + END_INIT 68 + };
+8 -333
kernel/irq/handle.c
··· 11 11 */ 12 12 13 13 #include <linux/irq.h> 14 - #include <linux/sched.h> 15 - #include <linux/slab.h> 16 - #include <linux/module.h> 17 14 #include <linux/random.h> 15 + #include <linux/sched.h> 18 16 #include <linux/interrupt.h> 19 17 #include <linux/kernel_stat.h> 20 - #include <linux/rculist.h> 21 - #include <linux/hash.h> 22 - #include <linux/radix-tree.h> 18 + 23 19 #include <trace/events/irq.h> 24 20 25 21 #include "internals.h" 26 - 27 - /* 28 - * lockdep: we want to handle all irq_desc locks as a single lock-class: 29 - */ 30 - struct lock_class_key irq_desc_lock_class; 31 22 32 23 /** 33 24 * handle_bad_irq - handle spurious and unhandled irqs ··· 33 42 kstat_incr_irqs_this_cpu(irq, desc); 34 43 ack_bad_irq(irq); 35 44 } 36 - 37 - #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) 38 - static void __init init_irq_default_affinity(void) 39 - { 40 - alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); 41 - cpumask_setall(irq_default_affinity); 42 - } 43 - #else 44 - static void __init init_irq_default_affinity(void) 45 - { 46 - } 47 - #endif 48 - 49 - /* 50 - * Linux has a controller-independent interrupt architecture. 51 - * Every controller has a 'controller-template', that is used 52 - * by the main code to do the right thing. Each driver-visible 53 - * interrupt source is transparently wired to the appropriate 54 - * controller. Thus drivers need not be aware of the 55 - * interrupt-controller. 56 - * 57 - * The code is designed to be easily extended with new/different 58 - * interrupt controllers, without having to do assembly magic or 59 - * having to touch the generic code. 60 - * 61 - * Controller mappings for all interrupt sources: 62 - */ 63 - int nr_irqs = NR_IRQS; 64 - EXPORT_SYMBOL_GPL(nr_irqs); 65 - 66 - #ifdef CONFIG_SPARSE_IRQ 67 - 68 - static struct irq_desc irq_desc_init = { 69 - .irq = -1, 70 - .status = IRQ_DISABLED, 71 - .chip = &no_irq_chip, 72 - .handle_irq = handle_bad_irq, 73 - .depth = 1, 74 - .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), 75 - }; 76 - 77 - void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr) 78 - { 79 - void *ptr; 80 - 81 - ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), 82 - GFP_ATOMIC, node); 83 - 84 - /* 85 - * don't overwite if can not get new one 86 - * init_copy_kstat_irqs() could still use old one 87 - */ 88 - if (ptr) { 89 - printk(KERN_DEBUG " alloc kstat_irqs on node %d\n", node); 90 - desc->kstat_irqs = ptr; 91 - } 92 - } 93 - 94 - static void init_one_irq_desc(int irq, struct irq_desc *desc, int node) 95 - { 96 - memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); 97 - 98 - raw_spin_lock_init(&desc->lock); 99 - desc->irq = irq; 100 - #ifdef CONFIG_SMP 101 - desc->node = node; 102 - #endif 103 - lockdep_set_class(&desc->lock, &irq_desc_lock_class); 104 - init_kstat_irqs(desc, node, nr_cpu_ids); 105 - if (!desc->kstat_irqs) { 106 - printk(KERN_ERR "can not alloc kstat_irqs\n"); 107 - BUG_ON(1); 108 - } 109 - if (!alloc_desc_masks(desc, node, false)) { 110 - printk(KERN_ERR "can not alloc irq_desc cpumasks\n"); 111 - BUG_ON(1); 112 - } 113 - init_desc_masks(desc); 114 - arch_init_chip_data(desc, node); 115 - } 116 - 117 - /* 118 - * Protect the sparse_irqs: 119 - */ 120 - DEFINE_RAW_SPINLOCK(sparse_irq_lock); 121 - 122 - static RADIX_TREE(irq_desc_tree, GFP_ATOMIC); 123 - 124 - static void set_irq_desc(unsigned int irq, struct irq_desc *desc) 125 - { 126 - radix_tree_insert(&irq_desc_tree, irq, desc); 127 - } 128 - 129 - struct irq_desc *irq_to_desc(unsigned int irq) 130 - { 131 - return radix_tree_lookup(&irq_desc_tree, irq); 132 - } 133 - 134 - void replace_irq_desc(unsigned int irq, struct irq_desc *desc) 135 - { 136 - void **ptr; 137 - 138 - ptr = radix_tree_lookup_slot(&irq_desc_tree, irq); 139 - if (ptr) 140 - radix_tree_replace_slot(ptr, desc); 141 - } 142 - 143 - static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { 144 - [0 ... NR_IRQS_LEGACY-1] = { 145 - .irq = -1, 146 - .status = IRQ_DISABLED, 147 - .chip = &no_irq_chip, 148 - .handle_irq = handle_bad_irq, 149 - .depth = 1, 150 - .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), 151 - } 152 - }; 153 - 154 - static unsigned int *kstat_irqs_legacy; 155 - 156 - int __init early_irq_init(void) 157 - { 158 - struct irq_desc *desc; 159 - int legacy_count; 160 - int node; 161 - int i; 162 - 163 - init_irq_default_affinity(); 164 - 165 - /* initialize nr_irqs based on nr_cpu_ids */ 166 - arch_probe_nr_irqs(); 167 - printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs); 168 - 169 - desc = irq_desc_legacy; 170 - legacy_count = ARRAY_SIZE(irq_desc_legacy); 171 - node = first_online_node; 172 - 173 - /* allocate based on nr_cpu_ids */ 174 - kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids * 175 - sizeof(int), GFP_NOWAIT, node); 176 - 177 - for (i = 0; i < legacy_count; i++) { 178 - desc[i].irq = i; 179 - #ifdef CONFIG_SMP 180 - desc[i].node = node; 181 - #endif 182 - desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; 183 - lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); 184 - alloc_desc_masks(&desc[i], node, true); 185 - init_desc_masks(&desc[i]); 186 - set_irq_desc(i, &desc[i]); 187 - } 188 - 189 - return arch_early_irq_init(); 190 - } 191 - 192 - struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) 193 - { 194 - struct irq_desc *desc; 195 - unsigned long flags; 196 - 197 - if (irq >= nr_irqs) { 198 - WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n", 199 - irq, nr_irqs); 200 - return NULL; 201 - } 202 - 203 - desc = irq_to_desc(irq); 204 - if (desc) 205 - return desc; 206 - 207 - raw_spin_lock_irqsave(&sparse_irq_lock, flags); 208 - 209 - /* We have to check it to avoid races with another CPU */ 210 - desc = irq_to_desc(irq); 211 - if (desc) 212 - goto out_unlock; 213 - 214 - desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); 215 - 216 - printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node); 217 - if (!desc) { 218 - printk(KERN_ERR "can not alloc irq_desc\n"); 219 - BUG_ON(1); 220 - } 221 - init_one_irq_desc(irq, desc, node); 222 - 223 - set_irq_desc(irq, desc); 224 - 225 - out_unlock: 226 - raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); 227 - 228 - return desc; 229 - } 230 - 231 - #else /* !CONFIG_SPARSE_IRQ */ 232 - 233 - struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { 234 - [0 ... NR_IRQS-1] = { 235 - .status = IRQ_DISABLED, 236 - .chip = &no_irq_chip, 237 - .handle_irq = handle_bad_irq, 238 - .depth = 1, 239 - .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), 240 - } 241 - }; 242 - 243 - static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS]; 244 - int __init early_irq_init(void) 245 - { 246 - struct irq_desc *desc; 247 - int count; 248 - int i; 249 - 250 - init_irq_default_affinity(); 251 - 252 - printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); 253 - 254 - desc = irq_desc; 255 - count = ARRAY_SIZE(irq_desc); 256 - 257 - for (i = 0; i < count; i++) { 258 - desc[i].irq = i; 259 - alloc_desc_masks(&desc[i], 0, true); 260 - init_desc_masks(&desc[i]); 261 - desc[i].kstat_irqs = kstat_irqs_all[i]; 262 - } 263 - return arch_early_irq_init(); 264 - } 265 - 266 - struct irq_desc *irq_to_desc(unsigned int irq) 267 - { 268 - return (irq < NR_IRQS) ? irq_desc + irq : NULL; 269 - } 270 - 271 - struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node) 272 - { 273 - return irq_to_desc(irq); 274 - } 275 - #endif /* !CONFIG_SPARSE_IRQ */ 276 - 277 - void clear_kstat_irqs(struct irq_desc *desc) 278 - { 279 - memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs))); 280 - } 281 - 282 - /* 283 - * What should we do if we get a hw irq event on an illegal vector? 284 - * Each architecture has to answer this themself. 285 - */ 286 - static void ack_bad(unsigned int irq) 287 - { 288 - struct irq_desc *desc = irq_to_desc(irq); 289 - 290 - print_irq_desc(irq, desc); 291 - ack_bad_irq(irq); 292 - } 293 - 294 - /* 295 - * NOP functions 296 - */ 297 - static void noop(unsigned int irq) 298 - { 299 - } 300 - 301 - static unsigned int noop_ret(unsigned int irq) 302 - { 303 - return 0; 304 - } 305 - 306 - /* 307 - * Generic no controller implementation 308 - */ 309 - struct irq_chip no_irq_chip = { 310 - .name = "none", 311 - .startup = noop_ret, 312 - .shutdown = noop, 313 - .enable = noop, 314 - .disable = noop, 315 - .ack = ack_bad, 316 - .end = noop, 317 - }; 318 - 319 - /* 320 - * Generic dummy implementation which can be used for 321 - * real dumb interrupt sources 322 - */ 323 - struct irq_chip dummy_irq_chip = { 324 - .name = "dummy", 325 - .startup = noop_ret, 326 - .shutdown = noop, 327 - .enable = noop, 328 - .disable = noop, 329 - .ack = noop, 330 - .mask = noop, 331 - .unmask = noop, 332 - .end = noop, 333 - }; 334 45 335 46 /* 336 47 * Special, empty irq handler: ··· 150 457 /* 151 458 * No locking required for CPU-local interrupts: 152 459 */ 153 - if (desc->chip->ack) 154 - desc->chip->ack(irq); 460 + if (desc->irq_data.chip->ack) 461 + desc->irq_data.chip->ack(irq); 155 462 if (likely(!(desc->status & IRQ_DISABLED))) { 156 463 action_ret = handle_IRQ_event(irq, desc->action); 157 464 if (!noirqdebug) 158 465 note_interrupt(irq, desc, action_ret); 159 466 } 160 - desc->chip->end(irq); 467 + desc->irq_data.chip->end(irq); 161 468 return 1; 162 469 } 163 470 164 471 raw_spin_lock(&desc->lock); 165 - if (desc->chip->ack) 166 - desc->chip->ack(irq); 472 + if (desc->irq_data.chip->ack) 473 + desc->irq_data.chip->ack(irq); 167 474 /* 168 475 * REPLAY is when Linux resends an IRQ that was dropped earlier 169 476 * WAITING is used by probe to mark irqs that are being tested ··· 223 530 * The ->end() handler has to deal with interrupts which got 224 531 * disabled while the handler was running. 225 532 */ 226 - desc->chip->end(irq); 533 + desc->irq_data.chip->end(irq); 227 534 raw_spin_unlock(&desc->lock); 228 535 229 536 return 1; 230 537 } 231 538 #endif 232 - 233 - void early_init_irq_lock_class(void) 234 - { 235 - struct irq_desc *desc; 236 - int i; 237 - 238 - for_each_irq_desc(i, desc) { 239 - lockdep_set_class(&desc->lock, &irq_desc_lock_class); 240 - } 241 - } 242 - 243 - unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) 244 - { 245 - struct irq_desc *desc = irq_to_desc(irq); 246 - return desc ? desc->kstat_irqs[cpu] : 0; 247 - } 248 - EXPORT_SYMBOL(kstat_irqs_cpu); 249 -
+26 -15
kernel/irq/internals.h
··· 1 1 /* 2 2 * IRQ subsystem internal functions and variables: 3 3 */ 4 + #include <linux/irqdesc.h> 4 5 5 6 extern int noirqdebug; 7 + 8 + #define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data) 6 9 7 10 /* Set default functions for irq_chip structures: */ 8 11 extern void irq_chip_set_defaults(struct irq_chip *chip); ··· 18 15 extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp); 19 16 extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); 20 17 21 - extern struct lock_class_key irq_desc_lock_class; 22 18 extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); 23 - extern void clear_kstat_irqs(struct irq_desc *desc); 24 - extern raw_spinlock_t sparse_irq_lock; 25 19 26 - #ifdef CONFIG_SPARSE_IRQ 27 - void replace_irq_desc(unsigned int irq, struct irq_desc *desc); 28 - #endif 20 + /* Resending of interrupts :*/ 21 + void check_irq_resend(struct irq_desc *desc, unsigned int irq); 29 22 30 23 #ifdef CONFIG_PROC_FS 31 24 extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); 25 + extern void unregister_irq_proc(unsigned int irq, struct irq_desc *desc); 32 26 extern void register_handler_proc(unsigned int irq, struct irqaction *action); 33 27 extern void unregister_handler_proc(unsigned int irq, struct irqaction *action); 34 28 #else 35 29 static inline void register_irq_proc(unsigned int irq, struct irq_desc *desc) { } 30 + static inline void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) { } 36 31 static inline void register_handler_proc(unsigned int irq, 37 32 struct irqaction *action) { } 38 33 static inline void unregister_handler_proc(unsigned int irq, ··· 41 40 42 41 extern void irq_set_thread_affinity(struct irq_desc *desc); 43 42 44 - /* Inline functions for support of irq chips on slow busses */ 45 - static inline void chip_bus_lock(unsigned int irq, struct irq_desc *desc) 43 + #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED 44 + static inline void irq_end(unsigned int irq, struct irq_desc *desc) 46 45 { 47 - if (unlikely(desc->chip->bus_lock)) 48 - desc->chip->bus_lock(irq); 46 + if (desc->irq_data.chip && desc->irq_data.chip->end) 47 + desc->irq_data.chip->end(irq); 48 + } 49 + #else 50 + static inline void irq_end(unsigned int irq, struct irq_desc *desc) { } 51 + #endif 52 + 53 + /* Inline functions for support of irq chips on slow busses */ 54 + static inline void chip_bus_lock(struct irq_desc *desc) 55 + { 56 + if (unlikely(desc->irq_data.chip->irq_bus_lock)) 57 + desc->irq_data.chip->irq_bus_lock(&desc->irq_data); 49 58 } 50 59 51 - static inline void chip_bus_sync_unlock(unsigned int irq, struct irq_desc *desc) 60 + static inline void chip_bus_sync_unlock(struct irq_desc *desc) 52 61 { 53 - if (unlikely(desc->chip->bus_sync_unlock)) 54 - desc->chip->bus_sync_unlock(irq); 62 + if (unlikely(desc->irq_data.chip->irq_bus_sync_unlock)) 63 + desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data); 55 64 } 56 65 57 66 /* ··· 78 67 irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled); 79 68 printk("->handle_irq(): %p, ", desc->handle_irq); 80 69 print_symbol("%s\n", (unsigned long)desc->handle_irq); 81 - printk("->chip(): %p, ", desc->chip); 82 - print_symbol("%s\n", (unsigned long)desc->chip); 70 + printk("->irq_data.chip(): %p, ", desc->irq_data.chip); 71 + print_symbol("%s\n", (unsigned long)desc->irq_data.chip); 83 72 printk("->action(): %p\n", desc->action); 84 73 if (desc->action) { 85 74 printk("->action->handler(): %p, ", desc->action->handler);
+395
kernel/irq/irqdesc.c
··· 1 + /* 2 + * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 3 + * Copyright (C) 2005-2006, Thomas Gleixner, Russell King 4 + * 5 + * This file contains the interrupt descriptor management code 6 + * 7 + * Detailed information is available in Documentation/DocBook/genericirq 8 + * 9 + */ 10 + #include <linux/irq.h> 11 + #include <linux/slab.h> 12 + #include <linux/module.h> 13 + #include <linux/interrupt.h> 14 + #include <linux/kernel_stat.h> 15 + #include <linux/radix-tree.h> 16 + #include <linux/bitmap.h> 17 + 18 + #include "internals.h" 19 + 20 + /* 21 + * lockdep: we want to handle all irq_desc locks as a single lock-class: 22 + */ 23 + static struct lock_class_key irq_desc_lock_class; 24 + 25 + #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) 26 + static void __init init_irq_default_affinity(void) 27 + { 28 + alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); 29 + cpumask_setall(irq_default_affinity); 30 + } 31 + #else 32 + static void __init init_irq_default_affinity(void) 33 + { 34 + } 35 + #endif 36 + 37 + #ifdef CONFIG_SMP 38 + static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) 39 + { 40 + if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node)) 41 + return -ENOMEM; 42 + 43 + #ifdef CONFIG_GENERIC_PENDING_IRQ 44 + if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { 45 + free_cpumask_var(desc->irq_data.affinity); 46 + return -ENOMEM; 47 + } 48 + #endif 49 + return 0; 50 + } 51 + 52 + static void desc_smp_init(struct irq_desc *desc, int node) 53 + { 54 + desc->irq_data.node = node; 55 + cpumask_copy(desc->irq_data.affinity, irq_default_affinity); 56 + #ifdef CONFIG_GENERIC_PENDING_IRQ 57 + cpumask_clear(desc->pending_mask); 58 + #endif 59 + } 60 + 61 + static inline int desc_node(struct irq_desc *desc) 62 + { 63 + return desc->irq_data.node; 64 + } 65 + 66 + #else 67 + static inline int 68 + alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; } 69 + static inline void desc_smp_init(struct irq_desc *desc, int node) { } 70 + static inline int desc_node(struct irq_desc *desc) { return 0; } 71 + #endif 72 + 73 + static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node) 74 + { 75 + desc->irq_data.irq = irq; 76 + desc->irq_data.chip = &no_irq_chip; 77 + desc->irq_data.chip_data = NULL; 78 + desc->irq_data.handler_data = NULL; 79 + desc->irq_data.msi_desc = NULL; 80 + desc->status = IRQ_DEFAULT_INIT_FLAGS; 81 + desc->handle_irq = handle_bad_irq; 82 + desc->depth = 1; 83 + desc->irq_count = 0; 84 + desc->irqs_unhandled = 0; 85 + desc->name = NULL; 86 + memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs))); 87 + desc_smp_init(desc, node); 88 + } 89 + 90 + int nr_irqs = NR_IRQS; 91 + EXPORT_SYMBOL_GPL(nr_irqs); 92 + 93 + static DEFINE_MUTEX(sparse_irq_lock); 94 + static DECLARE_BITMAP(allocated_irqs, NR_IRQS); 95 + 96 + #ifdef CONFIG_SPARSE_IRQ 97 + 98 + static RADIX_TREE(irq_desc_tree, GFP_KERNEL); 99 + 100 + static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) 101 + { 102 + radix_tree_insert(&irq_desc_tree, irq, desc); 103 + } 104 + 105 + struct irq_desc *irq_to_desc(unsigned int irq) 106 + { 107 + return radix_tree_lookup(&irq_desc_tree, irq); 108 + } 109 + 110 + static void delete_irq_desc(unsigned int irq) 111 + { 112 + radix_tree_delete(&irq_desc_tree, irq); 113 + } 114 + 115 + #ifdef CONFIG_SMP 116 + static void free_masks(struct irq_desc *desc) 117 + { 118 + #ifdef CONFIG_GENERIC_PENDING_IRQ 119 + free_cpumask_var(desc->pending_mask); 120 + #endif 121 + free_cpumask_var(desc->irq_data.affinity); 122 + } 123 + #else 124 + static inline void free_masks(struct irq_desc *desc) { } 125 + #endif 126 + 127 + static struct irq_desc *alloc_desc(int irq, int node) 128 + { 129 + struct irq_desc *desc; 130 + gfp_t gfp = GFP_KERNEL; 131 + 132 + desc = kzalloc_node(sizeof(*desc), gfp, node); 133 + if (!desc) 134 + return NULL; 135 + /* allocate based on nr_cpu_ids */ 136 + desc->kstat_irqs = kzalloc_node(nr_cpu_ids * sizeof(*desc->kstat_irqs), 137 + gfp, node); 138 + if (!desc->kstat_irqs) 139 + goto err_desc; 140 + 141 + if (alloc_masks(desc, gfp, node)) 142 + goto err_kstat; 143 + 144 + raw_spin_lock_init(&desc->lock); 145 + lockdep_set_class(&desc->lock, &irq_desc_lock_class); 146 + 147 + desc_set_defaults(irq, desc, node); 148 + 149 + return desc; 150 + 151 + err_kstat: 152 + kfree(desc->kstat_irqs); 153 + err_desc: 154 + kfree(desc); 155 + return NULL; 156 + } 157 + 158 + static void free_desc(unsigned int irq) 159 + { 160 + struct irq_desc *desc = irq_to_desc(irq); 161 + 162 + unregister_irq_proc(irq, desc); 163 + 164 + mutex_lock(&sparse_irq_lock); 165 + delete_irq_desc(irq); 166 + mutex_unlock(&sparse_irq_lock); 167 + 168 + free_masks(desc); 169 + kfree(desc->kstat_irqs); 170 + kfree(desc); 171 + } 172 + 173 + static int alloc_descs(unsigned int start, unsigned int cnt, int node) 174 + { 175 + struct irq_desc *desc; 176 + int i; 177 + 178 + for (i = 0; i < cnt; i++) { 179 + desc = alloc_desc(start + i, node); 180 + if (!desc) 181 + goto err; 182 + mutex_lock(&sparse_irq_lock); 183 + irq_insert_desc(start + i, desc); 184 + mutex_unlock(&sparse_irq_lock); 185 + } 186 + return start; 187 + 188 + err: 189 + for (i--; i >= 0; i--) 190 + free_desc(start + i); 191 + 192 + mutex_lock(&sparse_irq_lock); 193 + bitmap_clear(allocated_irqs, start, cnt); 194 + mutex_unlock(&sparse_irq_lock); 195 + return -ENOMEM; 196 + } 197 + 198 + struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) 199 + { 200 + int res = irq_alloc_descs(irq, irq, 1, node); 201 + 202 + if (res == -EEXIST || res == irq) 203 + return irq_to_desc(irq); 204 + return NULL; 205 + } 206 + 207 + int __init early_irq_init(void) 208 + { 209 + int i, initcnt, node = first_online_node; 210 + struct irq_desc *desc; 211 + 212 + init_irq_default_affinity(); 213 + 214 + /* Let arch update nr_irqs and return the nr of preallocated irqs */ 215 + initcnt = arch_probe_nr_irqs(); 216 + printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt); 217 + 218 + for (i = 0; i < initcnt; i++) { 219 + desc = alloc_desc(i, node); 220 + set_bit(i, allocated_irqs); 221 + irq_insert_desc(i, desc); 222 + } 223 + return arch_early_irq_init(); 224 + } 225 + 226 + #else /* !CONFIG_SPARSE_IRQ */ 227 + 228 + struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { 229 + [0 ... NR_IRQS-1] = { 230 + .status = IRQ_DEFAULT_INIT_FLAGS, 231 + .handle_irq = handle_bad_irq, 232 + .depth = 1, 233 + .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), 234 + } 235 + }; 236 + 237 + static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS]; 238 + int __init early_irq_init(void) 239 + { 240 + int count, i, node = first_online_node; 241 + struct irq_desc *desc; 242 + 243 + init_irq_default_affinity(); 244 + 245 + printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); 246 + 247 + desc = irq_desc; 248 + count = ARRAY_SIZE(irq_desc); 249 + 250 + for (i = 0; i < count; i++) { 251 + desc[i].irq_data.irq = i; 252 + desc[i].irq_data.chip = &no_irq_chip; 253 + desc[i].kstat_irqs = kstat_irqs_all[i]; 254 + alloc_masks(desc + i, GFP_KERNEL, node); 255 + desc_smp_init(desc + i, node); 256 + lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); 257 + } 258 + return arch_early_irq_init(); 259 + } 260 + 261 + struct irq_desc *irq_to_desc(unsigned int irq) 262 + { 263 + return (irq < NR_IRQS) ? irq_desc + irq : NULL; 264 + } 265 + 266 + struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node) 267 + { 268 + return irq_to_desc(irq); 269 + } 270 + 271 + static void free_desc(unsigned int irq) 272 + { 273 + dynamic_irq_cleanup(irq); 274 + } 275 + 276 + static inline int alloc_descs(unsigned int start, unsigned int cnt, int node) 277 + { 278 + return start; 279 + } 280 + #endif /* !CONFIG_SPARSE_IRQ */ 281 + 282 + /* Dynamic interrupt handling */ 283 + 284 + /** 285 + * irq_free_descs - free irq descriptors 286 + * @from: Start of descriptor range 287 + * @cnt: Number of consecutive irqs to free 288 + */ 289 + void irq_free_descs(unsigned int from, unsigned int cnt) 290 + { 291 + int i; 292 + 293 + if (from >= nr_irqs || (from + cnt) > nr_irqs) 294 + return; 295 + 296 + for (i = 0; i < cnt; i++) 297 + free_desc(from + i); 298 + 299 + mutex_lock(&sparse_irq_lock); 300 + bitmap_clear(allocated_irqs, from, cnt); 301 + mutex_unlock(&sparse_irq_lock); 302 + } 303 + 304 + /** 305 + * irq_alloc_descs - allocate and initialize a range of irq descriptors 306 + * @irq: Allocate for specific irq number if irq >= 0 307 + * @from: Start the search from this irq number 308 + * @cnt: Number of consecutive irqs to allocate. 309 + * @node: Preferred node on which the irq descriptor should be allocated 310 + * 311 + * Returns the first irq number or error code 312 + */ 313 + int __ref 314 + irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node) 315 + { 316 + int start, ret; 317 + 318 + if (!cnt) 319 + return -EINVAL; 320 + 321 + mutex_lock(&sparse_irq_lock); 322 + 323 + start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0); 324 + ret = -EEXIST; 325 + if (irq >=0 && start != irq) 326 + goto err; 327 + 328 + ret = -ENOMEM; 329 + if (start >= nr_irqs) 330 + goto err; 331 + 332 + bitmap_set(allocated_irqs, start, cnt); 333 + mutex_unlock(&sparse_irq_lock); 334 + return alloc_descs(start, cnt, node); 335 + 336 + err: 337 + mutex_unlock(&sparse_irq_lock); 338 + return ret; 339 + } 340 + 341 + /** 342 + * irq_reserve_irqs - mark irqs allocated 343 + * @from: mark from irq number 344 + * @cnt: number of irqs to mark 345 + * 346 + * Returns 0 on success or an appropriate error code 347 + */ 348 + int irq_reserve_irqs(unsigned int from, unsigned int cnt) 349 + { 350 + unsigned int start; 351 + int ret = 0; 352 + 353 + if (!cnt || (from + cnt) > nr_irqs) 354 + return -EINVAL; 355 + 356 + mutex_lock(&sparse_irq_lock); 357 + start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0); 358 + if (start == from) 359 + bitmap_set(allocated_irqs, start, cnt); 360 + else 361 + ret = -EEXIST; 362 + mutex_unlock(&sparse_irq_lock); 363 + return ret; 364 + } 365 + 366 + /** 367 + * irq_get_next_irq - get next allocated irq number 368 + * @offset: where to start the search 369 + * 370 + * Returns next irq number after offset or nr_irqs if none is found. 371 + */ 372 + unsigned int irq_get_next_irq(unsigned int offset) 373 + { 374 + return find_next_bit(allocated_irqs, nr_irqs, offset); 375 + } 376 + 377 + /** 378 + * dynamic_irq_cleanup - cleanup a dynamically allocated irq 379 + * @irq: irq number to initialize 380 + */ 381 + void dynamic_irq_cleanup(unsigned int irq) 382 + { 383 + struct irq_desc *desc = irq_to_desc(irq); 384 + unsigned long flags; 385 + 386 + raw_spin_lock_irqsave(&desc->lock, flags); 387 + desc_set_defaults(irq, desc, desc_node(desc)); 388 + raw_spin_unlock_irqrestore(&desc->lock, flags); 389 + } 390 + 391 + unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) 392 + { 393 + struct irq_desc *desc = irq_to_desc(irq); 394 + return desc ? desc->kstat_irqs[cpu] : 0; 395 + }
+44 -43
kernel/irq/manage.c
··· 73 73 { 74 74 struct irq_desc *desc = irq_to_desc(irq); 75 75 76 - if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip || 77 - !desc->chip->set_affinity) 76 + if (CHECK_IRQ_PER_CPU(desc->status) || !desc->irq_data.chip || 77 + !desc->irq_data.chip->irq_set_affinity) 78 78 return 0; 79 79 80 80 return 1; ··· 109 109 int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) 110 110 { 111 111 struct irq_desc *desc = irq_to_desc(irq); 112 + struct irq_chip *chip = desc->irq_data.chip; 112 113 unsigned long flags; 113 114 114 - if (!desc->chip->set_affinity) 115 + if (!chip->irq_set_affinity) 115 116 return -EINVAL; 116 117 117 118 raw_spin_lock_irqsave(&desc->lock, flags); 118 119 119 120 #ifdef CONFIG_GENERIC_PENDING_IRQ 120 121 if (desc->status & IRQ_MOVE_PCNTXT) { 121 - if (!desc->chip->set_affinity(irq, cpumask)) { 122 - cpumask_copy(desc->affinity, cpumask); 122 + if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) { 123 + cpumask_copy(desc->irq_data.affinity, cpumask); 123 124 irq_set_thread_affinity(desc); 124 125 } 125 126 } ··· 129 128 cpumask_copy(desc->pending_mask, cpumask); 130 129 } 131 130 #else 132 - if (!desc->chip->set_affinity(irq, cpumask)) { 133 - cpumask_copy(desc->affinity, cpumask); 131 + if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) { 132 + cpumask_copy(desc->irq_data.affinity, cpumask); 134 133 irq_set_thread_affinity(desc); 135 134 } 136 135 #endif ··· 169 168 * one of the targets is online. 170 169 */ 171 170 if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { 172 - if (cpumask_any_and(desc->affinity, cpu_online_mask) 171 + if (cpumask_any_and(desc->irq_data.affinity, cpu_online_mask) 173 172 < nr_cpu_ids) 174 173 goto set_affinity; 175 174 else 176 175 desc->status &= ~IRQ_AFFINITY_SET; 177 176 } 178 177 179 - cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity); 178 + cpumask_and(desc->irq_data.affinity, cpu_online_mask, irq_default_affinity); 180 179 set_affinity: 181 - desc->chip->set_affinity(irq, desc->affinity); 180 + desc->irq_data.chip->irq_set_affinity(&desc->irq_data, desc->irq_data.affinity, false); 182 181 183 182 return 0; 184 183 } ··· 224 223 225 224 if (!desc->depth++) { 226 225 desc->status |= IRQ_DISABLED; 227 - desc->chip->disable(irq); 226 + desc->irq_data.chip->irq_disable(&desc->irq_data); 228 227 } 229 228 } 230 229 ··· 247 246 if (!desc) 248 247 return; 249 248 250 - chip_bus_lock(irq, desc); 249 + chip_bus_lock(desc); 251 250 raw_spin_lock_irqsave(&desc->lock, flags); 252 251 __disable_irq(desc, irq, false); 253 252 raw_spin_unlock_irqrestore(&desc->lock, flags); 254 - chip_bus_sync_unlock(irq, desc); 253 + chip_bus_sync_unlock(desc); 255 254 } 256 255 EXPORT_SYMBOL(disable_irq_nosync); 257 256 ··· 314 313 * IRQ line is re-enabled. 315 314 * 316 315 * This function may be called from IRQ context only when 317 - * desc->chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! 316 + * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! 318 317 */ 319 318 void enable_irq(unsigned int irq) 320 319 { ··· 324 323 if (!desc) 325 324 return; 326 325 327 - chip_bus_lock(irq, desc); 326 + chip_bus_lock(desc); 328 327 raw_spin_lock_irqsave(&desc->lock, flags); 329 328 __enable_irq(desc, irq, false); 330 329 raw_spin_unlock_irqrestore(&desc->lock, flags); 331 - chip_bus_sync_unlock(irq, desc); 330 + chip_bus_sync_unlock(desc); 332 331 } 333 332 EXPORT_SYMBOL(enable_irq); 334 333 ··· 337 336 struct irq_desc *desc = irq_to_desc(irq); 338 337 int ret = -ENXIO; 339 338 340 - if (desc->chip->set_wake) 341 - ret = desc->chip->set_wake(irq, on); 339 + if (desc->irq_data.chip->irq_set_wake) 340 + ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); 342 341 343 342 return ret; 344 343 } ··· 430 429 } 431 430 432 431 int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, 433 - unsigned long flags) 432 + unsigned long flags) 434 433 { 435 434 int ret; 436 - struct irq_chip *chip = desc->chip; 435 + struct irq_chip *chip = desc->irq_data.chip; 437 436 438 - if (!chip || !chip->set_type) { 437 + if (!chip || !chip->irq_set_type) { 439 438 /* 440 439 * IRQF_TRIGGER_* but the PIC does not support multiple 441 440 * flow-types? ··· 446 445 } 447 446 448 447 /* caller masked out all except trigger mode flags */ 449 - ret = chip->set_type(irq, flags); 448 + ret = chip->irq_set_type(&desc->irq_data, flags); 450 449 451 450 if (ret) 452 - pr_err("setting trigger mode %d for irq %u failed (%pF)\n", 453 - (int)flags, irq, chip->set_type); 451 + pr_err("setting trigger mode %lu for irq %u failed (%pF)\n", 452 + flags, irq, chip->irq_set_type); 454 453 else { 455 454 if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) 456 455 flags |= IRQ_LEVEL; ··· 458 457 desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK); 459 458 desc->status |= flags; 460 459 461 - if (chip != desc->chip) 462 - irq_chip_set_defaults(desc->chip); 460 + if (chip != desc->irq_data.chip) 461 + irq_chip_set_defaults(desc->irq_data.chip); 463 462 } 464 463 465 464 return ret; ··· 508 507 static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) 509 508 { 510 509 again: 511 - chip_bus_lock(irq, desc); 510 + chip_bus_lock(desc); 512 511 raw_spin_lock_irq(&desc->lock); 513 512 514 513 /* ··· 522 521 */ 523 522 if (unlikely(desc->status & IRQ_INPROGRESS)) { 524 523 raw_spin_unlock_irq(&desc->lock); 525 - chip_bus_sync_unlock(irq, desc); 524 + chip_bus_sync_unlock(desc); 526 525 cpu_relax(); 527 526 goto again; 528 527 } 529 528 530 529 if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { 531 530 desc->status &= ~IRQ_MASKED; 532 - desc->chip->unmask(irq); 531 + desc->irq_data.chip->irq_unmask(&desc->irq_data); 533 532 } 534 533 raw_spin_unlock_irq(&desc->lock); 535 - chip_bus_sync_unlock(irq, desc); 534 + chip_bus_sync_unlock(desc); 536 535 } 537 536 538 537 #ifdef CONFIG_SMP ··· 557 556 } 558 557 559 558 raw_spin_lock_irq(&desc->lock); 560 - cpumask_copy(mask, desc->affinity); 559 + cpumask_copy(mask, desc->irq_data.affinity); 561 560 raw_spin_unlock_irq(&desc->lock); 562 561 563 562 set_cpus_allowed_ptr(current, mask); ··· 658 657 if (!desc) 659 658 return -EINVAL; 660 659 661 - if (desc->chip == &no_irq_chip) 660 + if (desc->irq_data.chip == &no_irq_chip) 662 661 return -ENOSYS; 663 662 /* 664 663 * Some drivers like serial.c use request_irq() heavily, ··· 753 752 } 754 753 755 754 if (!shared) { 756 - irq_chip_set_defaults(desc->chip); 755 + irq_chip_set_defaults(desc->irq_data.chip); 757 756 758 757 init_waitqueue_head(&desc->wait_for_threads); 759 758 ··· 780 779 if (!(desc->status & IRQ_NOAUTOEN)) { 781 780 desc->depth = 0; 782 781 desc->status &= ~IRQ_DISABLED; 783 - desc->chip->startup(irq); 782 + desc->irq_data.chip->irq_startup(&desc->irq_data); 784 783 } else 785 784 /* Undo nested disables: */ 786 785 desc->depth = 1; ··· 913 912 914 913 /* Currently used only by UML, might disappear one day: */ 915 914 #ifdef CONFIG_IRQ_RELEASE_METHOD 916 - if (desc->chip->release) 917 - desc->chip->release(irq, dev_id); 915 + if (desc->irq_data.chip->release) 916 + desc->irq_data.chip->release(irq, dev_id); 918 917 #endif 919 918 920 919 /* If this was the last handler, shut down the IRQ line: */ 921 920 if (!desc->action) { 922 921 desc->status |= IRQ_DISABLED; 923 - if (desc->chip->shutdown) 924 - desc->chip->shutdown(irq); 922 + if (desc->irq_data.chip->irq_shutdown) 923 + desc->irq_data.chip->irq_shutdown(&desc->irq_data); 925 924 else 926 - desc->chip->disable(irq); 925 + desc->irq_data.chip->irq_disable(&desc->irq_data); 927 926 } 928 927 929 928 #ifdef CONFIG_SMP ··· 998 997 if (!desc) 999 998 return; 1000 999 1001 - chip_bus_lock(irq, desc); 1000 + chip_bus_lock(desc); 1002 1001 kfree(__free_irq(irq, dev_id)); 1003 - chip_bus_sync_unlock(irq, desc); 1002 + chip_bus_sync_unlock(desc); 1004 1003 } 1005 1004 EXPORT_SYMBOL(free_irq); 1006 1005 ··· 1087 1086 action->name = devname; 1088 1087 action->dev_id = dev_id; 1089 1088 1090 - chip_bus_lock(irq, desc); 1089 + chip_bus_lock(desc); 1091 1090 retval = __setup_irq(irq, desc, action); 1092 - chip_bus_sync_unlock(irq, desc); 1091 + chip_bus_sync_unlock(desc); 1093 1092 1094 1093 if (retval) 1095 1094 kfree(action);
+7 -5
kernel/irq/migration.c
··· 7 7 void move_masked_irq(int irq) 8 8 { 9 9 struct irq_desc *desc = irq_to_desc(irq); 10 + struct irq_chip *chip = desc->irq_data.chip; 10 11 11 12 if (likely(!(desc->status & IRQ_MOVE_PENDING))) 12 13 return; ··· 25 24 if (unlikely(cpumask_empty(desc->pending_mask))) 26 25 return; 27 26 28 - if (!desc->chip->set_affinity) 27 + if (!chip->irq_set_affinity) 29 28 return; 30 29 31 30 assert_raw_spin_locked(&desc->lock); ··· 44 43 */ 45 44 if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) 46 45 < nr_cpu_ids)) 47 - if (!desc->chip->set_affinity(irq, desc->pending_mask)) { 48 - cpumask_copy(desc->affinity, desc->pending_mask); 46 + if (!chip->irq_set_affinity(&desc->irq_data, 47 + desc->pending_mask, false)) { 48 + cpumask_copy(desc->irq_data.affinity, desc->pending_mask); 49 49 irq_set_thread_affinity(desc); 50 50 } 51 51 ··· 63 61 if (unlikely(desc->status & IRQ_DISABLED)) 64 62 return; 65 63 66 - desc->chip->mask(irq); 64 + desc->irq_data.chip->irq_mask(&desc->irq_data); 67 65 move_masked_irq(irq); 68 - desc->chip->unmask(irq); 66 + desc->irq_data.chip->irq_unmask(&desc->irq_data); 69 67 } 70 68
-120
kernel/irq/numa_migrate.c
··· 1 - /* 2 - * NUMA irq-desc migration code 3 - * 4 - * Migrate IRQ data structures (irq_desc, chip_data, etc.) over to 5 - * the new "home node" of the IRQ. 6 - */ 7 - 8 - #include <linux/irq.h> 9 - #include <linux/slab.h> 10 - #include <linux/module.h> 11 - #include <linux/random.h> 12 - #include <linux/interrupt.h> 13 - #include <linux/kernel_stat.h> 14 - 15 - #include "internals.h" 16 - 17 - static void init_copy_kstat_irqs(struct irq_desc *old_desc, 18 - struct irq_desc *desc, 19 - int node, int nr) 20 - { 21 - init_kstat_irqs(desc, node, nr); 22 - 23 - if (desc->kstat_irqs != old_desc->kstat_irqs) 24 - memcpy(desc->kstat_irqs, old_desc->kstat_irqs, 25 - nr * sizeof(*desc->kstat_irqs)); 26 - } 27 - 28 - static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc) 29 - { 30 - if (old_desc->kstat_irqs == desc->kstat_irqs) 31 - return; 32 - 33 - kfree(old_desc->kstat_irqs); 34 - old_desc->kstat_irqs = NULL; 35 - } 36 - 37 - static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc, 38 - struct irq_desc *desc, int node) 39 - { 40 - memcpy(desc, old_desc, sizeof(struct irq_desc)); 41 - if (!alloc_desc_masks(desc, node, false)) { 42 - printk(KERN_ERR "irq %d: can not get new irq_desc cpumask " 43 - "for migration.\n", irq); 44 - return false; 45 - } 46 - raw_spin_lock_init(&desc->lock); 47 - desc->node = node; 48 - lockdep_set_class(&desc->lock, &irq_desc_lock_class); 49 - init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids); 50 - init_copy_desc_masks(old_desc, desc); 51 - arch_init_copy_chip_data(old_desc, desc, node); 52 - return true; 53 - } 54 - 55 - static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc) 56 - { 57 - free_kstat_irqs(old_desc, desc); 58 - free_desc_masks(old_desc, desc); 59 - arch_free_chip_data(old_desc, desc); 60 - } 61 - 62 - static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, 63 - int node) 64 - { 65 - struct irq_desc *desc; 66 - unsigned int irq; 67 - unsigned long flags; 68 - 69 - irq = old_desc->irq; 70 - 71 - raw_spin_lock_irqsave(&sparse_irq_lock, flags); 72 - 73 - /* We have to check it to avoid races with another CPU */ 74 - desc = irq_to_desc(irq); 75 - 76 - if (desc && old_desc != desc) 77 - goto out_unlock; 78 - 79 - desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); 80 - if (!desc) { 81 - printk(KERN_ERR "irq %d: can not get new irq_desc " 82 - "for migration.\n", irq); 83 - /* still use old one */ 84 - desc = old_desc; 85 - goto out_unlock; 86 - } 87 - if (!init_copy_one_irq_desc(irq, old_desc, desc, node)) { 88 - /* still use old one */ 89 - kfree(desc); 90 - desc = old_desc; 91 - goto out_unlock; 92 - } 93 - 94 - replace_irq_desc(irq, desc); 95 - raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); 96 - 97 - /* free the old one */ 98 - free_one_irq_desc(old_desc, desc); 99 - kfree(old_desc); 100 - 101 - return desc; 102 - 103 - out_unlock: 104 - raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); 105 - 106 - return desc; 107 - } 108 - 109 - struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) 110 - { 111 - /* those static or target node is -1, do not move them */ 112 - if (desc->irq < NR_IRQS_LEGACY || node == -1) 113 - return desc; 114 - 115 - if (desc->node != node) 116 - desc = __real_move_irq_desc(desc, node); 117 - 118 - return desc; 119 - } 120 -
+22 -4
kernel/irq/proc.c
··· 21 21 static int irq_affinity_proc_show(struct seq_file *m, void *v) 22 22 { 23 23 struct irq_desc *desc = irq_to_desc((long)m->private); 24 - const struct cpumask *mask = desc->affinity; 24 + const struct cpumask *mask = desc->irq_data.affinity; 25 25 26 26 #ifdef CONFIG_GENERIC_PENDING_IRQ 27 27 if (desc->status & IRQ_MOVE_PENDING) ··· 65 65 cpumask_var_t new_value; 66 66 int err; 67 67 68 - if (!irq_to_desc(irq)->chip->set_affinity || no_irq_affinity || 68 + if (!irq_to_desc(irq)->irq_data.chip->irq_set_affinity || no_irq_affinity || 69 69 irq_balancing_disabled(irq)) 70 70 return -EIO; 71 71 ··· 185 185 { 186 186 struct irq_desc *desc = irq_to_desc((long) m->private); 187 187 188 - seq_printf(m, "%d\n", desc->node); 188 + seq_printf(m, "%d\n", desc->irq_data.node); 189 189 return 0; 190 190 } 191 191 ··· 269 269 { 270 270 char name [MAX_NAMELEN]; 271 271 272 - if (!root_irq_dir || (desc->chip == &no_irq_chip) || desc->dir) 272 + if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir) 273 273 return; 274 274 275 275 memset(name, 0, MAX_NAMELEN); ··· 295 295 296 296 proc_create_data("spurious", 0444, desc->dir, 297 297 &irq_spurious_proc_fops, (void *)(long)irq); 298 + } 299 + 300 + void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) 301 + { 302 + char name [MAX_NAMELEN]; 303 + 304 + if (!root_irq_dir || !desc->dir) 305 + return; 306 + #ifdef CONFIG_SMP 307 + remove_proc_entry("smp_affinity", desc->dir); 308 + remove_proc_entry("affinity_hint", desc->dir); 309 + remove_proc_entry("node", desc->dir); 310 + #endif 311 + remove_proc_entry("spurious", desc->dir); 312 + 313 + memset(name, 0, MAX_NAMELEN); 314 + sprintf(name, "%u", irq); 315 + remove_proc_entry(name, root_irq_dir); 298 316 } 299 317 300 318 #undef MAX_NAMELEN
+3 -2
kernel/irq/resend.c
··· 60 60 /* 61 61 * Make sure the interrupt is enabled, before resending it: 62 62 */ 63 - desc->chip->enable(irq); 63 + desc->irq_data.chip->irq_enable(&desc->irq_data); 64 64 65 65 /* 66 66 * We do not resend level type interrupts. Level type ··· 70 70 if ((status & (IRQ_LEVEL | IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { 71 71 desc->status = (status & ~IRQ_PENDING) | IRQ_REPLAY; 72 72 73 - if (!desc->chip->retrigger || !desc->chip->retrigger(irq)) { 73 + if (!desc->irq_data.chip->irq_retrigger || 74 + !desc->irq_data.chip->irq_retrigger(&desc->irq_data)) { 74 75 #ifdef CONFIG_HARDIRQS_SW_RESEND 75 76 /* Set it pending and activate the softirq: */ 76 77 set_bit(irq, irqs_resend);
+5 -3
kernel/irq/spurious.c
··· 14 14 #include <linux/moduleparam.h> 15 15 #include <linux/timer.h> 16 16 17 + #include "internals.h" 18 + 17 19 static int irqfixup __read_mostly; 18 20 19 21 #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10) ··· 80 78 * If we did actual work for the real IRQ line we must let the 81 79 * IRQ controller clean up too 82 80 */ 83 - if (work && desc->chip && desc->chip->end) 84 - desc->chip->end(irq); 81 + if (work) 82 + irq_end(irq, desc); 85 83 raw_spin_unlock(&desc->lock); 86 84 87 85 return ok; ··· 256 254 printk(KERN_EMERG "Disabling IRQ #%d\n", irq); 257 255 desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED; 258 256 desc->depth++; 259 - desc->chip->disable(irq); 257 + desc->irq_data.chip->irq_disable(&desc->irq_data); 260 258 261 259 mod_timer(&poll_spurious_irq_timer, 262 260 jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
+3 -6
kernel/softirq.c
··· 910 910 return 0; 911 911 } 912 912 913 + #ifdef CONFIG_GENERIC_HARDIRQS 913 914 int __init __weak arch_probe_nr_irqs(void) 914 915 { 915 - return 0; 916 + return NR_IRQS_LEGACY; 916 917 } 917 918 918 919 int __init __weak arch_early_irq_init(void) 919 920 { 920 921 return 0; 921 922 } 922 - 923 - int __weak arch_init_chip_data(struct irq_desc *desc, int node) 924 - { 925 - return 0; 926 - } 923 + #endif