Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86: use nr_irqs

also add first_free_entry and pin_map_size, which were NR_IRQS derived
constants.

Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

authored by

Yinghai Lu and committed by
Ingo Molnar
0799e432 85c0f909

+44 -38
+14 -12
arch/x86/kernel/io_apic_32.c
··· 70 70 */ 71 71 int sis_apic_bug = -1; 72 72 73 + int first_free_entry = NR_IRQS; 73 74 /* 74 75 * # of IRQ routing registers 75 76 */ ··· 100 99 */ 101 100 #define MAX_PLUS_SHARED_IRQS NR_IRQS 102 101 #define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS) 102 + 103 + int pin_map_size = PIN_MAP_SIZE; 103 104 104 105 /* 105 106 * This is performance-critical, we want to do it O(1) ··· 216 213 */ 217 214 static void add_pin_to_irq(unsigned int irq, int apic, int pin) 218 215 { 219 - static int first_free_entry = NR_IRQS; 220 216 struct irq_pin_list *entry = irq_2_pin + irq; 221 217 222 218 while (entry->next) ··· 224 222 if (entry->pin != -1) { 225 223 entry->next = first_free_entry; 226 224 entry = irq_2_pin + entry->next; 227 - if (++first_free_entry >= PIN_MAP_SIZE) 225 + if (++first_free_entry >= pin_map_size) 228 226 panic("io_apic.c: whoops"); 229 227 } 230 228 entry->apic = apic; ··· 459 457 int i, j; 460 458 461 459 for_each_online_cpu(i) { 462 - for (j = 0; j < NR_IRQS; j++) { 460 + for (j = 0; j < nr_irqs; j++) { 463 461 if (!irq_desc[j].action) 464 462 continue; 465 463 /* Is it a significant load ? */ ··· 494 492 if (!cpu_online(i)) 495 493 continue; 496 494 package_index = CPU_TO_PACKAGEINDEX(i); 497 - for (j = 0; j < NR_IRQS; j++) { 495 + for (j = 0; j < nr_irqs; j++) { 498 496 unsigned long value_now, delta; 499 497 /* Is this an active IRQ or balancing disabled ? */ 500 498 if (!irq_desc[j].action || irq_balancing_disabled(j)) ··· 589 587 */ 590 588 move_this_load = 0; 591 589 selected_irq = -1; 592 - for (j = 0; j < NR_IRQS; j++) { 590 + for (j = 0; j < nr_irqs; j++) { 593 591 /* Is this an active IRQ? */ 594 592 if (!irq_desc[j].action) 595 593 continue; ··· 666 664 long time_remaining = balanced_irq_interval; 667 665 668 666 /* push everything to CPU 0 to give us a starting point. */ 669 - for (i = 0 ; i < NR_IRQS ; i++) { 667 + for (i = 0 ; i < nr_irqs ; i++) { 670 668 irq_desc[i].pending_mask = cpumask_of_cpu(0); 671 669 set_pending_irq(i, cpumask_of_cpu(0)); 672 670 } ··· 714 712 physical_balance = 1; 715 713 716 714 for_each_online_cpu(i) { 717 - irq_cpu_data[i].irq_delta = kzalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL); 718 - irq_cpu_data[i].last_irq = kzalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL); 715 + irq_cpu_data[i].irq_delta = kzalloc(sizeof(unsigned long) * nr_irqs, GFP_KERNEL); 716 + irq_cpu_data[i].last_irq = kzalloc(sizeof(unsigned long) * nr_irqs, GFP_KERNEL); 719 717 if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) { 720 718 printk(KERN_ERR "balanced_irq_init: out of memory"); 721 719 goto failed; ··· 1443 1441 } 1444 1442 } 1445 1443 printk(KERN_DEBUG "IRQ to pin mappings:\n"); 1446 - for (i = 0; i < NR_IRQS; i++) { 1444 + for (i = 0; i < nr_irqs; i++) { 1447 1445 struct irq_pin_list *entry = irq_2_pin + i; 1448 1446 if (entry->pin < 0) 1449 1447 continue; ··· 1623 1621 int i, apic; 1624 1622 unsigned long flags; 1625 1623 1626 - for (i = 0; i < PIN_MAP_SIZE; i++) { 1624 + for (i = 0; i < pin_map_size; i++) { 1627 1625 irq_2_pin[i].pin = -1; 1628 1626 irq_2_pin[i].next = 0; 1629 1627 } ··· 2007 2005 * Also, we've got to be careful not to trash gate 2008 2006 * 0x80, because int 0x80 is hm, kind of importantish. ;) 2009 2007 */ 2010 - for (irq = 0; irq < NR_IRQS ; irq++) { 2008 + for (irq = 0; irq < nr_irqs ; irq++) { 2011 2009 if (IO_APIC_IRQ(irq) && !irq_vector[irq]) { 2012 2010 /* 2013 2011 * Hmm.. We don't have an entry for this, ··· 2451 2449 2452 2450 irq = -ENOSPC; 2453 2451 spin_lock_irqsave(&vector_lock, flags); 2454 - for (new = (NR_IRQS - 1); new >= 0; new--) { 2452 + for (new = (nr_irqs - 1); new >= 0; new--) { 2455 2453 if (platform_legacy_irq(new)) 2456 2454 continue; 2457 2455 if (irq_vector[new] != 0)
+17 -16
arch/x86/kernel/io_apic_64.c
··· 132 132 #define MAX_PLUS_SHARED_IRQS NR_IRQS 133 133 #define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS) 134 134 135 + int pin_map_size = PIN_MAP_SIZE; 135 136 /* 136 137 * This is performance-critical, we want to do it O(1) 137 138 * ··· 225 224 int pin; \ 226 225 struct irq_pin_list *entry = irq_2_pin + irq; \ 227 226 \ 228 - BUG_ON(irq >= NR_IRQS); \ 227 + BUG_ON(irq >= nr_irqs); \ 229 228 for (;;) { \ 230 229 unsigned int reg; \ 231 230 pin = entry->pin; \ ··· 302 301 int apic, pin; 303 302 struct irq_pin_list *entry = irq_2_pin + irq; 304 303 305 - BUG_ON(irq >= NR_IRQS); 304 + BUG_ON(irq >= nr_irqs); 306 305 for (;;) { 307 306 unsigned int reg; 308 307 apic = entry->apic; ··· 359 358 * shared ISA-space IRQs, so we have to support them. We are super 360 359 * fast in the common case, and fast for shared ISA-space IRQs. 361 360 */ 361 + int first_free_entry = NR_IRQS; 362 362 static void add_pin_to_irq(unsigned int irq, int apic, int pin) 363 363 { 364 - static int first_free_entry = NR_IRQS; 365 364 struct irq_pin_list *entry = irq_2_pin + irq; 366 365 367 - BUG_ON(irq >= NR_IRQS); 366 + BUG_ON(irq >= nr_irqs); 368 367 while (entry->next) 369 368 entry = irq_2_pin + entry->next; 370 369 371 370 if (entry->pin != -1) { 372 371 entry->next = first_free_entry; 373 372 entry = irq_2_pin + entry->next; 374 - if (++first_free_entry >= PIN_MAP_SIZE) 373 + if (++first_free_entry >= pin_map_size) 375 374 panic("io_apic.c: ran out of irq_2_pin entries!"); 376 375 } 377 376 entry->apic = apic; ··· 635 634 best_guess = irq; 636 635 } 637 636 } 638 - BUG_ON(best_guess >= NR_IRQS); 637 + BUG_ON(best_guess >= nr_irqs); 639 638 return best_guess; 640 639 } 641 640 ··· 767 766 irq += nr_ioapic_registers[i++]; 768 767 irq += pin; 769 768 } 770 - BUG_ON(irq >= NR_IRQS); 769 + BUG_ON(irq >= nr_irqs); 771 770 return irq; 772 771 } 773 772 ··· 802 801 int cpu; 803 802 struct irq_cfg *cfg; 804 803 805 - BUG_ON((unsigned)irq >= NR_IRQS); 804 + BUG_ON((unsigned)irq >= nr_irqs); 806 805 cfg = &irq_cfg[irq]; 807 806 808 807 /* Only try and allocate irqs on cpus that are present */ ··· 876 875 cpumask_t mask; 877 876 int cpu, vector; 878 877 879 - BUG_ON((unsigned)irq >= NR_IRQS); 878 + BUG_ON((unsigned)irq >= nr_irqs); 880 879 cfg = &irq_cfg[irq]; 881 880 BUG_ON(!cfg->vector); 882 881 ··· 896 895 int irq, vector; 897 896 898 897 /* Mark the inuse vectors */ 899 - for (irq = 0; irq < NR_IRQS; ++irq) { 898 + for (irq = 0; irq < nr_irqs; ++irq) { 900 899 if (!cpu_isset(cpu, irq_cfg[irq].domain)) 901 900 continue; 902 901 vector = irq_cfg[irq].vector; ··· 1194 1193 } 1195 1194 } 1196 1195 printk(KERN_DEBUG "IRQ to pin mappings:\n"); 1197 - for (i = 0; i < NR_IRQS; i++) { 1196 + for (i = 0; i < nr_irqs; i++) { 1198 1197 struct irq_pin_list *entry = irq_2_pin + i; 1199 1198 if (entry->pin < 0) 1200 1199 continue; ··· 1367 1366 int i, apic; 1368 1367 unsigned long flags; 1369 1368 1370 - for (i = 0; i < PIN_MAP_SIZE; i++) { 1369 + for (i = 0; i < pin_map_size; i++) { 1371 1370 irq_2_pin[i].pin = -1; 1372 1371 irq_2_pin[i].next = 0; 1373 1372 } ··· 1659 1658 { 1660 1659 int irq; 1661 1660 1662 - for (irq = 0; irq < NR_IRQS; irq++) { 1661 + for (irq = 0; irq < nr_irqs; irq++) { 1663 1662 struct irq_desc *desc = irq_desc + irq; 1664 1663 if (desc->status & IRQ_MOVE_PENDING) { 1665 1664 unsigned long flags; ··· 1708 1707 struct irq_desc *desc; 1709 1708 struct irq_cfg *cfg; 1710 1709 irq = __get_cpu_var(vector_irq)[vector]; 1711 - if (irq >= NR_IRQS) 1710 + if (irq >= nr_irqs) 1712 1711 continue; 1713 1712 1714 1713 desc = irq_desc + irq; ··· 1866 1865 * Also, we've got to be careful not to trash gate 1867 1866 * 0x80, because int 0x80 is hm, kind of importantish. ;) 1868 1867 */ 1869 - for (irq = 0; irq < NR_IRQS ; irq++) { 1868 + for (irq = 0; irq < nr_irqs ; irq++) { 1870 1869 if (IO_APIC_IRQ(irq) && !irq_cfg[irq].vector) { 1871 1870 /* 1872 1871 * Hmm.. We don't have an entry for this, ··· 2280 2279 2281 2280 irq = -ENOSPC; 2282 2281 spin_lock_irqsave(&vector_lock, flags); 2283 - for (new = (NR_IRQS - 1); new >= 0; new--) { 2282 + for (new = (nr_irqs - 1); new >= 0; new--) { 2284 2283 if (platform_legacy_irq(new)) 2285 2284 continue; 2286 2285 if (irq_cfg[new].vector != 0)
+4 -4
arch/x86/kernel/irq_32.c
··· 226 226 int overflow, irq = ~regs->orig_ax; 227 227 struct irq_desc *desc = irq_desc + irq; 228 228 229 - if (unlikely((unsigned)irq >= NR_IRQS)) { 229 + if (unlikely((unsigned)irq >= nr_irqs)) { 230 230 printk(KERN_EMERG "%s: cannot handle IRQ %d\n", 231 231 __func__, irq); 232 232 BUG(); ··· 271 271 seq_putc(p, '\n'); 272 272 } 273 273 274 - if (i < NR_IRQS) { 274 + if (i < nr_irqs) { 275 275 unsigned any_count = 0; 276 276 277 277 spin_lock_irqsave(&irq_desc[i].lock, flags); ··· 303 303 seq_putc(p, '\n'); 304 304 skip: 305 305 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 306 - } else if (i == NR_IRQS) { 306 + } else if (i == nr_irqs) { 307 307 seq_printf(p, "NMI: "); 308 308 for_each_online_cpu(j) 309 309 seq_printf(p, "%10u ", nmi_count(j)); ··· 396 396 unsigned int irq; 397 397 static int warned; 398 398 399 - for (irq = 0; irq < NR_IRQS; irq++) { 399 + for (irq = 0; irq < nr_irqs; irq++) { 400 400 cpumask_t mask; 401 401 if (irq == 2) 402 402 continue;
+4 -4
arch/x86/kernel/irq_64.c
··· 81 81 seq_putc(p, '\n'); 82 82 } 83 83 84 - if (i < NR_IRQS) { 84 + if (i < nr_irqs) { 85 85 unsigned any_count = 0; 86 86 87 87 spin_lock_irqsave(&irq_desc[i].lock, flags); ··· 112 112 seq_putc(p, '\n'); 113 113 skip: 114 114 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 115 - } else if (i == NR_IRQS) { 115 + } else if (i == nr_irqs) { 116 116 seq_printf(p, "NMI: "); 117 117 for_each_online_cpu(j) 118 118 seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count); ··· 201 201 stack_overflow_check(regs); 202 202 #endif 203 203 204 - if (likely(irq < NR_IRQS)) 204 + if (likely(irq < nr_irqs)) 205 205 generic_handle_irq(irq); 206 206 else { 207 207 if (!disable_apic) ··· 224 224 unsigned int irq; 225 225 static int warned; 226 226 227 - for (irq = 0; irq < NR_IRQS; irq++) { 227 + for (irq = 0; irq < nr_irqs; irq++) { 228 228 cpumask_t mask; 229 229 int break_affinity = 0; 230 230 int set_affinity = 1;
+1 -1
arch/x86/kernel/irqinit_32.c
··· 100 100 */ 101 101 for (i = 0; i < (NR_VECTORS - FIRST_EXTERNAL_VECTOR); i++) { 102 102 int vector = FIRST_EXTERNAL_VECTOR + i; 103 - if (i >= NR_IRQS) 103 + if (i >= nr_irqs) 104 104 break; 105 105 /* SYSCALL_VECTOR was reserved in trap_init. */ 106 106 if (!test_bit(vector, used_vectors))
+1 -1
arch/x86/kernel/irqinit_64.c
··· 142 142 init_bsp_APIC(); 143 143 init_8259A(0); 144 144 145 - for (i = 0; i < NR_IRQS; i++) { 145 + for (i = 0; i < nr_irqs; i++) { 146 146 irq_desc[i].status = IRQ_DISABLED; 147 147 irq_desc[i].action = NULL; 148 148 irq_desc[i].depth = 1;
+3
include/asm-x86/irq.h
··· 10 10 #include <asm/apicdef.h> 11 11 #include <asm/irq_vectors.h> 12 12 13 + extern int pin_map_size; 14 + extern int first_free_entry; 15 + 13 16 static inline int irq_canonicalize(int irq) 14 17 { 15 18 return ((irq == 2) ? 9 : irq);