Merge branch 'irq-fixes-for-linus-4' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'irq-fixes-for-linus-4' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
sparseirq: move __weak symbols into separate compilation unit
sparseirq: work around __weak alias bug
sparseirq: fix hang with !SPARSE_IRQ
sparseirq: set lock_class for legacy irq when sparse_irq is selected
sparseirq: work around compiler optimizing away __weak functions
sparseirq: fix desc->lock init
sparseirq: do not printk when migrating IRQ descriptors
sparseirq: remove duplicated arch_early_irq_init()
irq: simplify for_each_irq_desc() usage
proc: remove ifdef CONFIG_SPARSE_IRQ from stat.c
irq: for_each_irq_desc() move to irqnr.h
hrtimer: remove #include <linux/irq.h>

+84 -109
+6 -12
arch/x86/kernel/io_apic.c
··· 170 170 [15] = { .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, }, 171 171 }; 172 172 173 - void __init arch_early_irq_init(void) 173 + int __init arch_early_irq_init(void) 174 174 { 175 175 struct irq_cfg *cfg; 176 176 struct irq_desc *desc; ··· 184 184 desc = irq_to_desc(i); 185 185 desc->chip_data = &cfg[i]; 186 186 } 187 + 188 + return 0; 187 189 } 188 190 189 191 #ifdef CONFIG_SPARSE_IRQ ··· 214 212 return cfg; 215 213 } 216 214 217 - void arch_init_chip_data(struct irq_desc *desc, int cpu) 215 + int arch_init_chip_data(struct irq_desc *desc, int cpu) 218 216 { 219 217 struct irq_cfg *cfg; 220 218 ··· 226 224 BUG_ON(1); 227 225 } 228 226 } 227 + 228 + return 0; 229 229 } 230 230 231 231 #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC ··· 1349 1345 1350 1346 /* Mark the inuse vectors */ 1351 1347 for_each_irq_desc(irq, desc) { 1352 - if (!desc) 1353 - continue; 1354 1348 cfg = desc->chip_data; 1355 1349 if (!cpu_isset(cpu, cfg->domain)) 1356 1350 continue; ··· 1732 1730 for_each_irq_desc(irq, desc) { 1733 1731 struct irq_pin_list *entry; 1734 1732 1735 - if (!desc) 1736 - continue; 1737 1733 cfg = desc->chip_data; 1738 1734 entry = cfg->irq_2_pin; 1739 1735 if (!entry) ··· 2378 2378 struct irq_desc *desc; 2379 2379 2380 2380 for_each_irq_desc(irq, desc) { 2381 - if (!desc) 2382 - continue; 2383 - 2384 2381 if (desc->status & IRQ_MOVE_PENDING) { 2385 2382 unsigned long flags; 2386 2383 ··· 2667 2670 * 0x80, because int 0x80 is hm, kind of importantish. ;) 2668 2671 */ 2669 2672 for_each_irq_desc(irq, desc) { 2670 - if (!desc) 2671 - continue; 2672 - 2673 2673 cfg = desc->chip_data; 2674 2674 if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) { 2675 2675 /*
-3
drivers/xen/events.c
··· 142 142 143 143 /* By default all event channels notify CPU#0. */ 144 144 for_each_irq_desc(i, desc) { 145 - if (!desc) 146 - continue; 147 - 148 145 desc->affinity = cpumask_of_cpu(0); 149 146 } 150 147 #endif
+1 -10
fs/proc/stat.c
··· 9 9 #include <linux/seq_file.h> 10 10 #include <linux/slab.h> 11 11 #include <linux/time.h> 12 + #include <linux/irqnr.h> 12 13 #include <asm/cputime.h> 13 14 14 15 #ifndef arch_irq_stat_cpu ··· 46 45 steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal); 47 46 guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest); 48 47 for_each_irq_nr(j) { 49 - #ifdef CONFIG_SPARSE_IRQ 50 - if (!irq_to_desc(j)) 51 - continue; 52 - #endif 53 48 sum += kstat_irqs_cpu(j, i); 54 49 } 55 50 sum += arch_irq_stat_cpu(i); ··· 92 95 /* sum again ? it could be updated? */ 93 96 for_each_irq_nr(j) { 94 97 per_irq_sum = 0; 95 - #ifdef CONFIG_SPARSE_IRQ 96 - if (!irq_to_desc(j)) { 97 - seq_printf(p, " %u", per_irq_sum); 98 - continue; 99 - } 100 - #endif 101 98 for_each_possible_cpu(i) 102 99 per_irq_sum += kstat_irqs_cpu(j, i); 103 100
+6
include/linux/interrupt.h
··· 464 464 465 465 int show_interrupts(struct seq_file *p, void *v); 466 466 467 + struct irq_desc; 468 + 469 + extern int early_irq_init(void); 470 + extern int arch_early_irq_init(void); 471 + extern int arch_init_chip_data(struct irq_desc *desc, int cpu); 472 + 467 473 #endif
+4 -23
include/linux/irq.h
··· 193 193 const char *name; 194 194 } ____cacheline_internodealigned_in_smp; 195 195 196 - extern void early_irq_init(void); 197 - extern void arch_early_irq_init(void); 198 - extern void arch_init_chip_data(struct irq_desc *desc, int cpu); 199 196 extern void arch_init_copy_chip_data(struct irq_desc *old_desc, 200 197 struct irq_desc *desc, int cpu); 201 198 extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc); 202 199 203 200 #ifndef CONFIG_SPARSE_IRQ 204 201 extern struct irq_desc irq_desc[NR_IRQS]; 205 - 206 - static inline struct irq_desc *irq_to_desc(unsigned int irq) 207 - { 208 - return (irq < NR_IRQS) ? irq_desc + irq : NULL; 209 - } 210 - static inline struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) 211 - { 212 - return irq_to_desc(irq); 213 - } 214 - 215 - #else 216 - 217 - extern struct irq_desc *irq_to_desc(unsigned int irq); 218 - extern struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu); 202 + #else /* CONFIG_SPARSE_IRQ */ 219 203 extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int cpu); 220 - 221 - # define for_each_irq_desc(irq, desc) \ 222 - for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; irq++, desc = irq_to_desc(irq)) 223 - # define for_each_irq_desc_reverse(irq, desc) \ 224 - for (irq = nr_irqs - 1, desc = irq_to_desc(irq); irq >= 0; irq--, desc = irq_to_desc(irq)) 225 204 226 205 #define kstat_irqs_this_cpu(DESC) \ 227 206 ((DESC)->kstat_irqs[smp_processor_id()]) 228 207 #define kstat_incr_irqs_this_cpu(irqno, DESC) \ 229 208 ((DESC)->kstat_irqs[smp_processor_id()]++) 230 209 231 - #endif 210 + #endif /* CONFIG_SPARSE_IRQ */ 211 + 212 + extern struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu); 232 213 233 214 static inline struct irq_desc * 234 215 irq_remap_to_desc(unsigned int irq, struct irq_desc *desc)
+13 -10
include/linux/irqnr.h
··· 15 15 16 16 # define for_each_irq_desc_reverse(irq, desc) \ 17 17 for (irq = nr_irqs - 1; irq >= 0; irq--) 18 - #else 18 + #else /* CONFIG_GENERIC_HARDIRQS */ 19 19 20 20 extern int nr_irqs; 21 + extern struct irq_desc *irq_to_desc(unsigned int irq); 21 22 22 - #ifndef CONFIG_SPARSE_IRQ 23 + # define for_each_irq_desc(irq, desc) \ 24 + for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; \ 25 + irq++, desc = irq_to_desc(irq)) \ 26 + if (desc) 23 27 24 - struct irq_desc; 25 - # define for_each_irq_desc(irq, desc) \ 26 - for (irq = 0, desc = irq_desc; irq < nr_irqs; irq++, desc++) 27 - # define for_each_irq_desc_reverse(irq, desc) \ 28 - for (irq = nr_irqs - 1, desc = irq_desc + (nr_irqs - 1); \ 29 - irq >= 0; irq--, desc--) 30 - #endif 31 - #endif 28 + 29 + # define for_each_irq_desc_reverse(irq, desc) \ 30 + for (irq = nr_irqs - 1, desc = irq_to_desc(irq); irq >= 0; \ 31 + irq--, desc = irq_to_desc(irq)) \ 32 + if (desc) 33 + 34 + #endif /* CONFIG_GENERIC_HARDIRQS */ 32 35 33 36 #define for_each_irq_nr(irq) \ 34 37 for (irq = 0; irq < nr_irqs; irq++)
-9
init/main.c
··· 540 540 { 541 541 } 542 542 543 - void __init __weak arch_early_irq_init(void) 544 - { 545 - } 546 - 547 - void __init __weak early_irq_init(void) 548 - { 549 - arch_early_irq_init(); 550 - } 551 - 552 543 asmlinkage void __init start_kernel(void) 553 544 { 554 545 char * command_line;
-1
kernel/hrtimer.c
··· 32 32 */ 33 33 34 34 #include <linux/cpu.h> 35 - #include <linux/irq.h> 36 35 #include <linux/module.h> 37 36 #include <linux/percpu.h> 38 37 #include <linux/hrtimer.h>
-15
kernel/irq/autoprobe.c
··· 40 40 * flush such a longstanding irq before considering it as spurious. 41 41 */ 42 42 for_each_irq_desc_reverse(i, desc) { 43 - if (!desc) 44 - continue; 45 - 46 43 spin_lock_irq(&desc->lock); 47 44 if (!desc->action && !(desc->status & IRQ_NOPROBE)) { 48 45 /* ··· 68 71 * happened in the previous stage, it may have masked itself) 69 72 */ 70 73 for_each_irq_desc_reverse(i, desc) { 71 - if (!desc) 72 - continue; 73 - 74 74 spin_lock_irq(&desc->lock); 75 75 if (!desc->action && !(desc->status & IRQ_NOPROBE)) { 76 76 desc->status |= IRQ_AUTODETECT | IRQ_WAITING; ··· 86 92 * Now filter out any obviously spurious interrupts 87 93 */ 88 94 for_each_irq_desc(i, desc) { 89 - if (!desc) 90 - continue; 91 - 92 95 spin_lock_irq(&desc->lock); 93 96 status = desc->status; 94 97 ··· 124 133 int i; 125 134 126 135 for_each_irq_desc(i, desc) { 127 - if (!desc) 128 - continue; 129 - 130 136 spin_lock_irq(&desc->lock); 131 137 status = desc->status; 132 138 ··· 166 178 unsigned int status; 167 179 168 180 for_each_irq_desc(i, desc) { 169 - if (!desc) 170 - continue; 171 - 172 181 spin_lock_irq(&desc->lock); 173 182 status = desc->status; 174 183
+32 -16
kernel/irq/handle.c
··· 56 56 int nr_irqs = NR_IRQS; 57 57 EXPORT_SYMBOL_GPL(nr_irqs); 58 58 59 - void __init __attribute__((weak)) arch_early_irq_init(void) 60 - { 61 - } 62 - 63 59 #ifdef CONFIG_SPARSE_IRQ 64 60 static struct irq_desc irq_desc_init = { 65 61 .irq = -1, ··· 86 90 desc->kstat_irqs = (unsigned int *)ptr; 87 91 } 88 92 89 - void __attribute__((weak)) arch_init_chip_data(struct irq_desc *desc, int cpu) 90 - { 91 - } 92 - 93 93 static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) 94 94 { 95 95 memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); 96 + 97 + spin_lock_init(&desc->lock); 96 98 desc->irq = irq; 97 99 #ifdef CONFIG_SMP 98 100 desc->cpu = cpu; ··· 128 134 /* FIXME: use bootmem alloc ...*/ 129 135 static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS]; 130 136 131 - void __init early_irq_init(void) 137 + int __init early_irq_init(void) 132 138 { 133 139 struct irq_desc *desc; 134 140 int legacy_count; ··· 140 146 for (i = 0; i < legacy_count; i++) { 141 147 desc[i].irq = i; 142 148 desc[i].kstat_irqs = kstat_irqs_legacy[i]; 149 + lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); 143 150 144 151 irq_desc_ptrs[i] = desc + i; 145 152 } ··· 148 153 for (i = legacy_count; i < NR_IRQS; i++) 149 154 irq_desc_ptrs[i] = NULL; 150 155 151 - arch_early_irq_init(); 156 + return arch_early_irq_init(); 152 157 } 153 158 154 159 struct irq_desc *irq_to_desc(unsigned int irq) ··· 198 203 return desc; 199 204 } 200 205 201 - #else 206 + #else /* !CONFIG_SPARSE_IRQ */ 202 207 203 208 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { 204 209 [0 ... NR_IRQS-1] = { ··· 213 218 } 214 219 }; 215 220 216 - #endif 221 + int __init early_irq_init(void) 222 + { 223 + struct irq_desc *desc; 224 + int count; 225 + int i; 226 + 227 + desc = irq_desc; 228 + count = ARRAY_SIZE(irq_desc); 229 + 230 + for (i = 0; i < count; i++) 231 + desc[i].irq = i; 232 + 233 + return arch_early_irq_init(); 234 + } 235 + 236 + struct irq_desc *irq_to_desc(unsigned int irq) 237 + { 238 + return (irq < NR_IRQS) ? irq_desc + irq : NULL; 239 + } 240 + 241 + struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) 242 + { 243 + return irq_to_desc(irq); 244 + } 245 + #endif /* !CONFIG_SPARSE_IRQ */ 217 246 218 247 /* 219 248 * What should we do if we get a hw irq event on an illegal vector? ··· 447 428 int i; 448 429 449 430 for_each_irq_desc(i, desc) { 450 - if (!desc) 451 - continue; 452 - 453 431 lockdep_set_class(&desc->lock, &irq_desc_lock_class); 454 432 } 455 433 } ··· 455 439 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) 456 440 { 457 441 struct irq_desc *desc = irq_to_desc(irq); 458 - return desc->kstat_irqs[cpu]; 442 + return desc ? desc->kstat_irqs[cpu] : 0; 459 443 } 460 444 #endif 461 445 EXPORT_SYMBOL(kstat_irqs_cpu);
+2 -5
kernel/irq/numa_migrate.c
··· 42 42 struct irq_desc *desc, int cpu) 43 43 { 44 44 memcpy(desc, old_desc, sizeof(struct irq_desc)); 45 + spin_lock_init(&desc->lock); 45 46 desc->cpu = cpu; 46 47 lockdep_set_class(&desc->lock, &irq_desc_lock_class); 47 48 init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids); ··· 75 74 76 75 node = cpu_to_node(cpu); 77 76 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); 78 - printk(KERN_DEBUG " move irq_desc for %d to cpu %d node %d\n", 79 - irq, cpu, node); 80 77 if (!desc) { 81 - printk(KERN_ERR "can not get new irq_desc for moving\n"); 78 + printk(KERN_ERR "irq %d: can not get new irq_desc for migration.\n", irq); 82 79 /* still use old one */ 83 80 desc = old_desc; 84 81 goto out_unlock; ··· 105 106 return desc; 106 107 107 108 old_cpu = desc->cpu; 108 - printk(KERN_DEBUG 109 - "try to move irq_desc from cpu %d to %d\n", old_cpu, cpu); 110 109 if (old_cpu != cpu) { 111 110 node = cpu_to_node(cpu); 112 111 old_node = cpu_to_node(old_cpu);
-5
kernel/irq/spurious.c
··· 91 91 int i, ok = 0; 92 92 93 93 for_each_irq_desc(i, desc) { 94 - if (!desc) 95 - continue; 96 - 97 94 if (!i) 98 95 continue; 99 96 ··· 112 115 for_each_irq_desc(i, desc) { 113 116 unsigned int status; 114 117 115 - if (!desc) 116 - continue; 117 118 if (!i) 118 119 continue; 119 120
+20
kernel/softirq.c
··· 784 784 } 785 785 EXPORT_SYMBOL(on_each_cpu); 786 786 #endif 787 + 788 + /* 789 + * [ These __weak aliases are kept in a separate compilation unit, so that 790 + * GCC does not inline them incorrectly. ] 791 + */ 792 + 793 + int __init __weak early_irq_init(void) 794 + { 795 + return 0; 796 + } 797 + 798 + int __init __weak arch_early_irq_init(void) 799 + { 800 + return 0; 801 + } 802 + 803 + int __weak arch_init_chip_data(struct irq_desc *desc, int cpu) 804 + { 805 + return 0; 806 + }