Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/irq: Store irq descriptor in vector array

We can spare the irq_desc lookup in the interrupt entry code if we
store the descriptor pointer in the vector array instead the interrupt
number.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Jiang Liu <jiang.liu@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Link: http://lkml.kernel.org/r/20150802203609.717724106@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

+58 -66
+3 -3
arch/x86/include/asm/hw_irq.h
··· 182 182 #define trace_irq_entries_start irq_entries_start 183 183 #endif 184 184 185 - #define VECTOR_UNUSED (-1) 186 - #define VECTOR_RETRIGGERED (-2) 185 + #define VECTOR_UNUSED NULL 186 + #define VECTOR_RETRIGGERED ((void *)~0UL) 187 187 188 - typedef int vector_irq_t[NR_VECTORS]; 188 + typedef struct irq_desc* vector_irq_t[NR_VECTORS]; 189 189 DECLARE_PER_CPU(vector_irq_t, vector_irq); 190 190 191 191 #endif /* !ASSEMBLY_ */
+3 -1
arch/x86/include/asm/irq.h
··· 36 36 37 37 extern void (*x86_platform_ipi_callback)(void); 38 38 extern void native_init_IRQ(void); 39 - extern bool handle_irq(unsigned irq, struct pt_regs *regs); 39 + 40 + struct irq_desc; 41 + extern bool handle_irq(struct irq_desc *desc, struct pt_regs *regs); 40 42 41 43 extern __visible unsigned int do_IRQ(struct pt_regs *regs); 42 44
+24 -27
arch/x86/kernel/apic/vector.c
··· 169 169 goto next; 170 170 171 171 for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) { 172 - if (per_cpu(vector_irq, new_cpu)[vector] > VECTOR_UNUSED) 172 + if (!IS_ERR_OR_NULL(per_cpu(vector_irq, new_cpu)[vector])) 173 173 goto next; 174 174 } 175 175 /* Found one! */ ··· 181 181 cpumask_intersects(d->old_domain, cpu_online_mask); 182 182 } 183 183 for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) 184 - per_cpu(vector_irq, new_cpu)[vector] = irq; 184 + per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq); 185 185 d->cfg.vector = vector; 186 186 cpumask_copy(d->domain, vector_cpumask); 187 187 err = 0; ··· 223 223 224 224 static void clear_irq_vector(int irq, struct apic_chip_data *data) 225 225 { 226 - int cpu, vector; 226 + struct irq_desc *desc; 227 227 unsigned long flags; 228 + int cpu, vector; 228 229 229 230 raw_spin_lock_irqsave(&vector_lock, flags); 230 231 BUG_ON(!data->cfg.vector); ··· 242 241 return; 243 242 } 244 243 244 + desc = irq_to_desc(irq); 245 245 for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) { 246 246 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; 247 247 vector++) { 248 - if (per_cpu(vector_irq, cpu)[vector] != irq) 248 + if (per_cpu(vector_irq, cpu)[vector] != desc) 249 249 continue; 250 250 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED; 251 251 break; ··· 404 402 return arch_early_ioapic_init(); 405 403 } 406 404 405 + /* Initialize vector_irq on a new cpu */ 407 406 static void __setup_vector_irq(int cpu) 408 407 { 409 - /* Initialize vector_irq on a new cpu */ 410 - int irq, vector; 411 408 struct apic_chip_data *data; 409 + struct irq_desc *desc; 410 + int irq, vector; 412 411 413 412 /* Mark the inuse vectors */ 414 - for_each_active_irq(irq) { 415 - data = apic_chip_data(irq_get_irq_data(irq)); 416 - if (!data) 417 - continue; 413 + for_each_irq_desc(irq, desc) { 414 + struct irq_data *idata = irq_desc_get_irq_data(desc); 418 415 419 - if (!cpumask_test_cpu(cpu, data->domain)) 416 + data = apic_chip_data(idata); 417 + if (!data || !cpumask_test_cpu(cpu, data->domain)) 420 418 continue; 421 419 vector = data->cfg.vector; 422 - per_cpu(vector_irq, cpu)[vector] = irq; 420 + per_cpu(vector_irq, cpu)[vector] = desc; 423 421 } 424 422 /* Mark the free vectors */ 425 423 for (vector = 0; vector < NR_VECTORS; ++vector) { 426 - irq = per_cpu(vector_irq, cpu)[vector]; 427 - if (irq <= VECTOR_UNUSED) 424 + desc = per_cpu(vector_irq, cpu)[vector]; 425 + if (IS_ERR_OR_NULL(desc)) 428 426 continue; 429 427 430 - data = apic_chip_data(irq_get_irq_data(irq)); 428 + data = apic_chip_data(irq_desc_get_irq_data(desc)); 431 429 if (!cpumask_test_cpu(cpu, data->domain)) 432 430 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED; 433 431 } ··· 449 447 * legacy vector to irq mapping: 450 448 */ 451 449 for (irq = 0; irq < nr_legacy_irqs(); irq++) 452 - per_cpu(vector_irq, cpu)[ISA_IRQ_VECTOR(irq)] = irq; 450 + per_cpu(vector_irq, cpu)[ISA_IRQ_VECTOR(irq)] = irq_to_desc(irq); 453 451 454 452 __setup_vector_irq(cpu); 455 453 } ··· 545 543 546 544 me = smp_processor_id(); 547 545 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { 548 - int irq; 549 - unsigned int irr; 550 - struct irq_desc *desc; 551 546 struct apic_chip_data *data; 547 + struct irq_desc *desc; 548 + unsigned int irr; 552 549 553 550 retry: 554 - irq = __this_cpu_read(vector_irq[vector]); 555 - 556 - if (irq <= VECTOR_UNUSED) 557 - continue; 558 - 559 - desc = irq_to_desc(irq); 560 - if (!desc) 551 + desc = __this_cpu_read(vector_irq[vector]); 552 + if (IS_ERR_OR_NULL(desc)) 561 553 continue; 562 554 563 555 if (!raw_spin_trylock(&desc->lock)) { ··· 561 565 goto retry; 562 566 } 563 567 564 - data = apic_chip_data(&desc->irq_data); 568 + data = apic_chip_data(irq_desc_get_irq_data(desc)); 565 569 if (!data) 566 570 goto unlock; 571 + 567 572 /* 568 573 * Check if the irq migration is in progress. If so, we 569 574 * haven't received the cleanup request yet for this irq.
+16 -21
arch/x86/kernel/irq.c
··· 211 211 __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs) 212 212 { 213 213 struct pt_regs *old_regs = set_irq_regs(regs); 214 - 214 + struct irq_desc * desc; 215 215 /* high bit used in ret_from_ code */ 216 216 unsigned vector = ~regs->orig_ax; 217 - unsigned irq; 218 217 219 218 entering_irq(); 220 219 221 - irq = __this_cpu_read(vector_irq[vector]); 220 + desc = __this_cpu_read(vector_irq[vector]); 222 221 223 - if (!handle_irq(irq, regs)) { 222 + if (!handle_irq(desc, regs)) { 224 223 ack_APIC_irq(); 225 224 226 - if (irq != VECTOR_RETRIGGERED) { 227 - pr_emerg_ratelimited("%s: %d.%d No irq handler for vector (irq %d)\n", 225 + if (desc != VECTOR_RETRIGGERED) { 226 + pr_emerg_ratelimited("%s: %d.%d No irq handler for vector\n", 228 227 __func__, smp_processor_id(), 229 - vector, irq); 228 + vector); 230 229 } else { 231 230 __this_cpu_write(vector_irq[vector], VECTOR_UNUSED); 232 231 } ··· 329 330 */ 330 331 int check_irq_vectors_for_cpu_disable(void) 331 332 { 332 - int irq, cpu; 333 333 unsigned int this_cpu, vector, this_count, count; 334 334 struct irq_desc *desc; 335 335 struct irq_data *data; 336 + int cpu; 336 337 337 338 this_cpu = smp_processor_id(); 338 339 cpumask_copy(&online_new, cpu_online_mask); ··· 340 341 341 342 this_count = 0; 342 343 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { 343 - irq = __this_cpu_read(vector_irq[vector]); 344 - if (irq < 0) 344 + desc = __this_cpu_read(vector_irq[vector]); 345 + if (IS_ERR_OR_NULL(desc)) 345 346 continue; 346 - desc = irq_to_desc(irq); 347 - if (!desc) 348 - continue; 349 - 350 347 /* 351 348 * Protect against concurrent action removal, affinity 352 349 * changes etc. 353 350 */ 354 351 raw_spin_lock(&desc->lock); 355 352 data = irq_desc_get_irq_data(desc); 356 - cpumask_copy(&affinity_new, irq_data_get_affinity_mask(data)); 353 + cpumask_copy(&affinity_new, 354 + irq_data_get_affinity_mask(data)); 357 355 cpumask_clear_cpu(this_cpu, &affinity_new); 358 356 359 357 /* Do not count inactive or per-cpu irqs. */ 360 - if (!irq_has_action(irq) || irqd_is_per_cpu(data)) { 358 + if (!irq_desc_has_action(desc) || irqd_is_per_cpu(data)) { 361 359 raw_spin_unlock(&desc->lock); 362 360 continue; 363 361 } ··· 395 399 for (vector = FIRST_EXTERNAL_VECTOR; 396 400 vector < first_system_vector; vector++) { 397 401 if (!test_bit(vector, used_vectors) && 398 - per_cpu(vector_irq, cpu)[vector] <= VECTOR_UNUSED) 399 - count++; 402 + IS_ERR_OR_NULL(per_cpu(vector_irq, cpu)[vector])) 403 + count++; 400 404 } 401 405 } 402 406 ··· 500 504 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { 501 505 unsigned int irr; 502 506 503 - if (__this_cpu_read(vector_irq[vector]) <= VECTOR_UNUSED) 507 + if (IS_ERR_OR_NULL(__this_cpu_read(vector_irq[vector]))) 504 508 continue; 505 509 506 510 irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); 507 511 if (irr & (1 << (vector % 32))) { 508 - irq = __this_cpu_read(vector_irq[vector]); 512 + desc = __this_cpu_read(vector_irq[vector]); 509 513 510 - desc = irq_to_desc(irq); 511 514 raw_spin_lock(&desc->lock); 512 515 data = irq_desc_get_irq_data(desc); 513 516 chip = irq_data_get_irq_chip(data);
+4 -5
arch/x86/kernel/irq_32.c
··· 148 148 call_on_stack(__do_softirq, isp); 149 149 } 150 150 151 - bool handle_irq(unsigned irq, struct pt_regs *regs) 151 + bool handle_irq(struct irq_desc *desc, struct pt_regs *regs) 152 152 { 153 - struct irq_desc *desc; 153 + unsigned int irq = irq_desc_get_irq(desc); 154 154 int overflow; 155 155 156 156 overflow = check_stack_overflow(); 157 157 158 - desc = irq_to_desc(irq); 159 - if (unlikely(!desc)) 158 + if (IS_ERR_OR_NULL(desc)) 160 159 return false; 161 160 162 161 if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) { 163 162 if (unlikely(overflow)) 164 163 print_stack_overflow(); 165 - desc->handle_irq(irq, desc); 164 + generic_handle_irq_desc(irq, desc); 166 165 } 167 166 168 167 return true;
+3 -6
arch/x86/kernel/irq_64.c
··· 68 68 #endif 69 69 } 70 70 71 - bool handle_irq(unsigned irq, struct pt_regs *regs) 71 + bool handle_irq(struct irq_desc *desc, struct pt_regs *regs) 72 72 { 73 - struct irq_desc *desc; 74 - 75 73 stack_overflow_check(regs); 76 74 77 - desc = irq_to_desc(irq); 78 - if (unlikely(!desc)) 75 + if (unlikely(IS_ERR_OR_NULL(desc))) 79 76 return false; 80 77 81 - generic_handle_irq_desc(irq, desc); 78 + generic_handle_irq_desc(irq_desc_get_irq(desc), desc); 82 79 return true; 83 80 }
+2 -2
arch/x86/kernel/irqinit.c
··· 60 60 int cpu; 61 61 62 62 for_each_online_cpu(cpu) { 63 - if (per_cpu(vector_irq, cpu)[vector] > VECTOR_UNUSED) 63 + if (!IS_ERR_OR_NULL(per_cpu(vector_irq, cpu)[vector])) 64 64 return 1; 65 65 } 66 66 ··· 94 94 * irq's migrate etc. 95 95 */ 96 96 for (i = 0; i < nr_legacy_irqs(); i++) 97 - per_cpu(vector_irq, 0)[ISA_IRQ_VECTOR(i)] = i; 97 + per_cpu(vector_irq, 0)[ISA_IRQ_VECTOR(i)] = irq_to_desc(i); 98 98 99 99 x86_init.irqs.intr_init(); 100 100 }
+3 -1
arch/x86/lguest/boot.c
··· 843 843 */ 844 844 static int lguest_setup_irq(unsigned int irq) 845 845 { 846 + struct irq_desc *desc; 846 847 int err; 847 848 848 849 /* Returns -ve error or vector number. */ ··· 859 858 handle_level_irq, "level"); 860 859 861 860 /* Some systems map "vectors" to interrupts weirdly. Not us! */ 862 - __this_cpu_write(vector_irq[FIRST_EXTERNAL_VECTOR + irq], irq); 861 + desc = irq_to_desc(irq); 862 + __this_cpu_write(vector_irq[FIRST_EXTERNAL_VECTOR + irq], desc); 863 863 return 0; 864 864 } 865 865