Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

genirq: revert dynarray

Revert the dynarray changes. They need more thought and polishing.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

authored by

Thomas Gleixner and committed by
Ingo Molnar
d6c88a50 ee32c973

+103 -496
-4
arch/Kconfig
··· 102 102 help 103 103 The <linux/clk.h> calls support software clock gating and 104 104 thus are a key power management tool on many systems. 105 - 106 - config HAVE_DYN_ARRAY 107 - def_bool n 108 -
-1
arch/x86/Kconfig
··· 33 33 select HAVE_ARCH_TRACEHOOK 34 34 select HAVE_GENERIC_DMA_COHERENT if X86_32 35 35 select HAVE_EFFICIENT_UNALIGNED_ACCESS 36 - select HAVE_DYN_ARRAY 37 36 38 37 config ARCH_DEFCONFIG 39 38 string
+74 -125
arch/x86/kernel/io_apic.c
··· 107 107 } 108 108 early_param("noapic", parse_noapic); 109 109 110 - struct irq_cfg; 111 110 struct irq_pin_list; 112 111 struct irq_cfg { 113 112 unsigned int irq; ··· 119 120 }; 120 121 121 122 /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ 122 - static struct irq_cfg irq_cfg_legacy[] __initdata = { 123 + static struct irq_cfg irq_cfgx[NR_IRQS] = { 123 124 [0] = { .irq = 0, .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, }, 124 125 [1] = { .irq = 1, .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, }, 125 126 [2] = { .irq = 2, .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, }, ··· 138 139 [15] = { .irq = 15, .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, }, 139 140 }; 140 141 141 - static struct irq_cfg irq_cfg_init = { .irq = -1U, }; 142 - 143 - static void init_one_irq_cfg(struct irq_cfg *cfg) 144 - { 145 - memcpy(cfg, &irq_cfg_init, sizeof(struct irq_cfg)); 146 - } 147 - 148 - static struct irq_cfg *irq_cfgx; 149 - 150 - static void __init init_work(void *data) 151 - { 152 - struct dyn_array *da = data; 153 - struct irq_cfg *cfg; 154 - int legacy_count; 155 - int i; 156 - 157 - cfg = *da->name; 158 - 159 - memcpy(cfg, irq_cfg_legacy, sizeof(irq_cfg_legacy)); 160 - 161 - legacy_count = ARRAY_SIZE(irq_cfg_legacy); 162 - for (i = legacy_count; i < *da->nr; i++) 163 - init_one_irq_cfg(&cfg[i]); 164 - } 165 - 166 142 #define for_each_irq_cfg(irq, cfg) \ 167 - for (irq = 0, cfg = &irq_cfgx[irq]; irq < nr_irqs; irq++, cfg = &irq_cfgx[irq]) 143 + for (irq = 0, cfg = irq_cfgx; irq < nr_irqs; irq++, cfg++) 168 144 169 - DEFINE_DYN_ARRAY(irq_cfgx, sizeof(struct irq_cfg), nr_irqs, PAGE_SIZE, init_work); 170 - 171 - struct irq_cfg *irq_cfg(unsigned int irq) 145 + static struct irq_cfg *irq_cfg(unsigned int irq) 172 146 { 173 - if (irq < nr_irqs) 174 - return &irq_cfgx[irq]; 175 - 176 - return NULL; 147 + return irq < nr_irqs ? irq_cfgx + irq : NULL; 177 148 } 178 - struct irq_cfg *irq_cfg_alloc(unsigned int irq) 149 + 150 + static struct irq_cfg *irq_cfg_alloc(unsigned int irq) 179 151 { 180 152 return irq_cfg(irq); 181 153 } 154 + 155 + /* 156 + * Rough estimation of how many shared IRQs there are, can be changed 157 + * anytime. 158 + */ 159 + #define MAX_PLUS_SHARED_IRQS NR_IRQS 160 + #define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS) 182 161 183 162 /* 184 163 * This is performance-critical, we want to do it O(1) ··· 170 193 struct irq_pin_list *next; 171 194 }; 172 195 173 - static struct irq_pin_list *irq_2_pin_head; 174 - /* fill one page ? */ 175 - static int nr_irq_2_pin = 0x100; 196 + static struct irq_pin_list irq_2_pin_head[PIN_MAP_SIZE]; 176 197 static struct irq_pin_list *irq_2_pin_ptr; 177 - static void __init irq_2_pin_init_work(void *data) 198 + 199 + static void __init irq_2_pin_init(void) 178 200 { 179 - struct dyn_array *da = data; 180 - struct irq_pin_list *pin; 201 + struct irq_pin_list *pin = irq_2_pin_head; 181 202 int i; 182 203 183 - pin = *da->name; 184 - 185 - for (i = 1; i < *da->nr; i++) 204 + for (i = 1; i < PIN_MAP_SIZE; i++) 186 205 pin[i-1].next = &pin[i]; 187 206 188 207 irq_2_pin_ptr = &pin[0]; 189 208 } 190 - DEFINE_DYN_ARRAY(irq_2_pin_head, sizeof(struct irq_pin_list), nr_irq_2_pin, PAGE_SIZE, irq_2_pin_init_work); 191 209 192 210 static struct irq_pin_list *get_one_free_irq_2_pin(void) 193 211 { 194 - struct irq_pin_list *pin; 195 - int i; 196 - 197 - pin = irq_2_pin_ptr; 198 - 199 - if (pin) { 200 - irq_2_pin_ptr = pin->next; 201 - pin->next = NULL; 202 - return pin; 203 - } 204 - 205 - /* 206 - * we run out of pre-allocate ones, allocate more 207 - */ 208 - printk(KERN_DEBUG "try to get more irq_2_pin %d\n", nr_irq_2_pin); 209 - 210 - if (after_bootmem) 211 - pin = kzalloc(sizeof(struct irq_pin_list)*nr_irq_2_pin, 212 - GFP_ATOMIC); 213 - else 214 - pin = __alloc_bootmem_nopanic(sizeof(struct irq_pin_list) * 215 - nr_irq_2_pin, PAGE_SIZE, 0); 212 + struct irq_pin_list *pin = irq_2_pin_ptr; 216 213 217 214 if (!pin) 218 215 panic("can not get more irq_2_pin\n"); 219 216 220 - for (i = 1; i < nr_irq_2_pin; i++) 221 - pin[i-1].next = &pin[i]; 222 - 223 217 irq_2_pin_ptr = pin->next; 224 218 pin->next = NULL; 225 - 226 219 return pin; 227 220 } 228 221 ··· 231 284 static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value) 232 285 { 233 286 struct io_apic __iomem *io_apic = io_apic_base(apic); 234 - if (sis_apic_bug) 235 - writel(reg, &io_apic->index); 287 + 288 + if (sis_apic_bug) 289 + writel(reg, &io_apic->index); 236 290 writel(value, &io_apic->data); 237 291 } 238 292 ··· 992 1044 while (i < apic) 993 1045 irq += nr_ioapic_registers[i++]; 994 1046 irq += pin; 995 - /* 1047 + /* 996 1048 * For MPS mode, so far only needed by ES7000 platform 997 1049 */ 998 - if (ioapic_renumber_irq) 999 - irq = ioapic_renumber_irq(apic, irq); 1050 + if (ioapic_renumber_irq) 1051 + irq = ioapic_renumber_irq(apic, irq); 1000 1052 } 1001 1053 1002 1054 #ifdef CONFIG_X86_32 ··· 1180 1232 #ifdef CONFIG_X86_32 1181 1233 static inline int IO_APIC_irq_trigger(int irq) 1182 1234 { 1183 - int apic, idx, pin; 1235 + int apic, idx, pin; 1184 1236 1185 - for (apic = 0; apic < nr_ioapics; apic++) { 1186 - for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { 1187 - idx = find_irq_entry(apic, pin, mp_INT); 1188 - if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin))) 1189 - return irq_trigger(idx); 1190 - } 1191 - } 1192 - /* 1237 + for (apic = 0; apic < nr_ioapics; apic++) { 1238 + for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { 1239 + idx = find_irq_entry(apic, pin, mp_INT); 1240 + if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin))) 1241 + return irq_trigger(idx); 1242 + } 1243 + } 1244 + /* 1193 1245 * nonexistent IRQs are edge default 1194 1246 */ 1195 - return 0; 1247 + return 0; 1196 1248 } 1197 1249 #else 1198 1250 static inline int IO_APIC_irq_trigger(int irq) ··· 1457 1509 reg_01.raw = io_apic_read(apic, 1); 1458 1510 if (reg_01.bits.version >= 0x10) 1459 1511 reg_02.raw = io_apic_read(apic, 2); 1460 - if (reg_01.bits.version >= 0x20) 1461 - reg_03.raw = io_apic_read(apic, 3); 1512 + if (reg_01.bits.version >= 0x20) 1513 + reg_03.raw = io_apic_read(apic, 3); 1462 1514 spin_unlock_irqrestore(&ioapic_lock, flags); 1463 1515 1464 1516 printk("\n"); ··· 2037 2089 #else 2038 2090 static int ioapic_retrigger_irq(unsigned int irq) 2039 2091 { 2040 - send_IPI_self(irq_cfg(irq)->vector); 2092 + send_IPI_self(irq_cfg(irq)->vector); 2041 2093 2042 - return 1; 2094 + return 1; 2043 2095 } 2044 2096 #endif 2045 2097 ··· 2137 2189 2138 2190 if (io_apic_level_ack_pending(irq)) { 2139 2191 /* 2140 - * Interrupt in progress. Migrating irq now will change the 2192 + * Interrupt in progress. Migrating irq now will change the 2141 2193 * vector information in the IO-APIC RTE and that will confuse 2142 2194 * the EOI broadcast performed by cpu. 2143 2195 * So, delay the irq migration to the next instance. ··· 2374 2426 } 2375 2427 2376 2428 static struct irq_chip ioapic_chip __read_mostly = { 2377 - .name = "IO-APIC", 2378 - .startup = startup_ioapic_irq, 2379 - .mask = mask_IO_APIC_irq, 2380 - .unmask = unmask_IO_APIC_irq, 2381 - .ack = ack_apic_edge, 2382 - .eoi = ack_apic_level, 2429 + .name = "IO-APIC", 2430 + .startup = startup_ioapic_irq, 2431 + .mask = mask_IO_APIC_irq, 2432 + .unmask = unmask_IO_APIC_irq, 2433 + .ack = ack_apic_edge, 2434 + .eoi = ack_apic_level, 2383 2435 #ifdef CONFIG_SMP 2384 - .set_affinity = set_ioapic_affinity_irq, 2436 + .set_affinity = set_ioapic_affinity_irq, 2385 2437 #endif 2386 2438 .retrigger = ioapic_retrigger_irq, 2387 2439 }; 2388 2440 2389 2441 #ifdef CONFIG_INTR_REMAP 2390 2442 static struct irq_chip ir_ioapic_chip __read_mostly = { 2391 - .name = "IR-IO-APIC", 2392 - .startup = startup_ioapic_irq, 2393 - .mask = mask_IO_APIC_irq, 2394 - .unmask = unmask_IO_APIC_irq, 2395 - .ack = ack_x2apic_edge, 2396 - .eoi = ack_x2apic_level, 2443 + .name = "IR-IO-APIC", 2444 + .startup = startup_ioapic_irq, 2445 + .mask = mask_IO_APIC_irq, 2446 + .unmask = unmask_IO_APIC_irq, 2447 + .ack = ack_x2apic_edge, 2448 + .eoi = ack_x2apic_level, 2397 2449 #ifdef CONFIG_SMP 2398 - .set_affinity = set_ir_ioapic_affinity_irq, 2450 + .set_affinity = set_ir_ioapic_affinity_irq, 2399 2451 #endif 2400 2452 .retrigger = ioapic_retrigger_irq, 2401 2453 }; ··· 2584 2636 2585 2637 local_irq_save(flags); 2586 2638 2587 - ver = apic_read(APIC_LVR); 2588 - ver = GET_APIC_VERSION(ver); 2639 + ver = apic_read(APIC_LVR); 2640 + ver = GET_APIC_VERSION(ver); 2589 2641 2590 2642 /* 2591 2643 * get/set the timer IRQ vector: ··· 2770 2822 io_apic_irqs = ~PIC_IRQS; 2771 2823 2772 2824 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n"); 2773 - /* 2825 + /* 2774 2826 * Set up IO-APIC IRQ routing. 2775 2827 */ 2776 2828 #ifdef CONFIG_X86_32 2777 - if (!acpi_ioapic) 2778 - setup_ioapic_ids_from_mpc(); 2829 + if (!acpi_ioapic) 2830 + setup_ioapic_ids_from_mpc(); 2779 2831 #endif 2780 2832 sync_Arb_IDs(); 2781 2833 setup_IO_APIC_irqs(); ··· 2790 2842 2791 2843 static int __init io_apic_bug_finalize(void) 2792 2844 { 2793 - if (sis_apic_bug == -1) 2794 - sis_apic_bug = 0; 2795 - return 0; 2845 + if (sis_apic_bug == -1) 2846 + sis_apic_bug = 0; 2847 + return 0; 2796 2848 } 2797 2849 2798 2850 late_initcall(io_apic_bug_finalize); ··· 3147 3199 if (index < 0) { 3148 3200 printk(KERN_ERR 3149 3201 "Unable to allocate %d IRTE for PCI %s\n", nvec, 3150 - pci_name(dev)); 3202 + pci_name(dev)); 3151 3203 return -ENOSPC; 3152 3204 } 3153 3205 return index; ··· 3833 3885 void __init ioapic_init_mappings(void) 3834 3886 { 3835 3887 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0; 3836 - int i; 3837 3888 struct resource *ioapic_res; 3889 + int i; 3838 3890 3891 + irq_2_pin_init(); 3839 3892 ioapic_res = ioapic_setup_resources(); 3840 3893 for (i = 0; i < nr_ioapics; i++) { 3841 3894 if (smp_found_config) { 3842 3895 ioapic_phys = mp_ioapics[i].mp_apicaddr; 3843 3896 #ifdef CONFIG_X86_32 3844 - if (!ioapic_phys) { 3845 - printk(KERN_ERR 3846 - "WARNING: bogus zero IO-APIC " 3847 - "address found in MPTABLE, " 3848 - "disabling IO/APIC support!\n"); 3849 - smp_found_config = 0; 3850 - skip_ioapic_setup = 1; 3851 - goto fake_ioapic_page; 3852 - } 3897 + if (!ioapic_phys) { 3898 + printk(KERN_ERR 3899 + "WARNING: bogus zero IO-APIC " 3900 + "address found in MPTABLE, " 3901 + "disabling IO/APIC support!\n"); 3902 + smp_found_config = 0; 3903 + skip_ioapic_setup = 1; 3904 + goto fake_ioapic_page; 3905 + } 3853 3906 #endif 3854 3907 } else { 3855 3908 #ifdef CONFIG_X86_32
+2 -6
arch/x86/kernel/setup_percpu.c
··· 140 140 */ 141 141 void __init setup_per_cpu_areas(void) 142 142 { 143 - ssize_t size, old_size, da_size; 143 + ssize_t size, old_size; 144 144 char *ptr; 145 145 int cpu; 146 146 unsigned long align = 1; ··· 150 150 151 151 /* Copy section for each CPU (we discard the original) */ 152 152 old_size = PERCPU_ENOUGH_ROOM; 153 - da_size = per_cpu_dyn_array_size(&align); 154 153 align = max_t(unsigned long, PAGE_SIZE, align); 155 - size = roundup(old_size + da_size, align); 154 + size = roundup(old_size, align); 156 155 printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n", 157 156 size); 158 157 ··· 181 182 #endif 182 183 per_cpu_offset(cpu) = ptr - __per_cpu_start; 183 184 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 184 - 185 - per_cpu_alloc_dyn_array(cpu, ptr + old_size); 186 - 187 185 } 188 186 189 187 printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n",
+1 -1
arch/x86/kernel/visws_quirks.c
··· 633 633 /* 634 634 * handle this 'virtual interrupt' as a Cobalt one now. 635 635 */ 636 - kstat_irqs_this_cpu(desc)++; 636 + kstat_incr_irqs_this_cpu(realirq, desc); 637 637 638 638 if (likely(desc->action != NULL)) 639 639 handle_IRQ_event(realirq, desc->action);
-1
arch/x86/kernel/vmlinux_32.lds.S
··· 145 145 *(.x86_cpu_dev.init) 146 146 __x86_cpu_dev_end = .; 147 147 } 148 - DYN_ARRAY_INIT(8) 149 148 SECURITY_INIT 150 149 . = ALIGN(4); 151 150 .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
-2
arch/x86/kernel/vmlinux_64.lds.S
··· 174 174 } 175 175 __x86_cpu_dev_end = .; 176 176 177 - DYN_ARRAY_INIT(8) 178 - 179 177 SECURITY_INIT 180 178 181 179 . = ALIGN(8);
+1 -1
arch/x86/xen/spinlock.c
··· 241 241 ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq)); 242 242 } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */ 243 243 244 - kstat_irqs_this_cpu(irq_to_desc(irq))++; 244 + kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); 245 245 246 246 out: 247 247 raw_local_irq_restore(flags);
-5
drivers/char/random.c
··· 558 558 unsigned dont_count_entropy:1; 559 559 }; 560 560 561 - #ifdef CONFIG_HAVE_DYN_ARRAY 562 - static struct timer_rand_state **irq_timer_state; 563 - DEFINE_DYN_ARRAY(irq_timer_state, sizeof(struct timer_rand_state *), nr_irqs, PAGE_SIZE, NULL); 564 - #else 565 561 static struct timer_rand_state *irq_timer_state[NR_IRQS]; 566 - #endif 567 562 568 563 static struct timer_rand_state *get_timer_rand_state(unsigned int irq) 569 564 {
+2 -9
drivers/pci/intr_remapping.c
··· 19 19 u8 irte_mask; 20 20 }; 21 21 22 - #ifdef CONFIG_HAVE_DYN_ARRAY 23 - static struct irq_2_iommu *irq_2_iommuX; 24 - DEFINE_DYN_ARRAY(irq_2_iommuX, sizeof(struct irq_2_iommu), nr_irqs, PAGE_SIZE, NULL); 25 - #else 26 22 static struct irq_2_iommu irq_2_iommuX[NR_IRQS]; 27 - #endif 28 23 29 24 static struct irq_2_iommu *irq_2_iommu(unsigned int irq) 30 25 { 31 - if (irq < nr_irqs) 32 - return &irq_2_iommuX[irq]; 33 - 34 - return NULL; 26 + return (irq < nr_irqs) ?: irq_2_iommuX + irq : NULL; 35 27 } 28 + 36 29 static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) 37 30 { 38 31 return irq_2_iommu(irq);
-13
include/asm-generic/vmlinux.lds.h
··· 210 210 * All archs are supposed to use RO_DATA() */ 211 211 #define RODATA RO_DATA(4096) 212 212 213 - #define DYN_ARRAY_INIT(align) \ 214 - . = ALIGN((align)); \ 215 - .dyn_array.init : AT(ADDR(.dyn_array.init) - LOAD_OFFSET) { \ 216 - VMLINUX_SYMBOL(__dyn_array_start) = .; \ 217 - *(.dyn_array.init) \ 218 - VMLINUX_SYMBOL(__dyn_array_end) = .; \ 219 - } \ 220 - . = ALIGN((align)); \ 221 - .per_cpu_dyn_array.init : AT(ADDR(.per_cpu_dyn_array.init) - LOAD_OFFSET) { \ 222 - VMLINUX_SYMBOL(__per_cpu_dyn_array_start) = .; \ 223 - *(.per_cpu_dyn_array.init) \ 224 - VMLINUX_SYMBOL(__per_cpu_dyn_array_end) = .; \ 225 - } 226 213 #define SECURITY_INIT \ 227 214 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \ 228 215 VMLINUX_SYMBOL(__security_initcall_start) = .; \
-43
include/linux/init.h
··· 247 247 /* Relies on boot_command_line being set */ 248 248 void __init parse_early_param(void); 249 249 250 - struct dyn_array { 251 - void **name; 252 - unsigned long size; 253 - unsigned int *nr; 254 - unsigned long align; 255 - void (*init_work)(void *); 256 - }; 257 - extern struct dyn_array *__dyn_array_start[], *__dyn_array_end[]; 258 - extern struct dyn_array *__per_cpu_dyn_array_start[], *__per_cpu_dyn_array_end[]; 259 - 260 - #define DEFINE_DYN_ARRAY_ADDR(nameX, addrX, sizeX, nrX, alignX, init_workX) \ 261 - static struct dyn_array __dyn_array_##nameX __initdata = \ 262 - { .name = (void **)&(nameX),\ 263 - .size = sizeX,\ 264 - .nr = &(nrX),\ 265 - .align = alignX,\ 266 - .init_work = init_workX,\ 267 - }; \ 268 - static struct dyn_array *__dyn_array_ptr_##nameX __used \ 269 - __attribute__((__section__(".dyn_array.init"))) = \ 270 - &__dyn_array_##nameX 271 - 272 - #define DEFINE_DYN_ARRAY(nameX, sizeX, nrX, alignX, init_workX) \ 273 - DEFINE_DYN_ARRAY_ADDR(nameX, nameX, sizeX, nrX, alignX, init_workX) 274 - 275 - #define DEFINE_PER_CPU_DYN_ARRAY_ADDR(nameX, addrX, sizeX, nrX, alignX, init_workX) \ 276 - static struct dyn_array __per_cpu_dyn_array_##nameX __initdata = \ 277 - { .name = (void **)&(addrX),\ 278 - .size = sizeX,\ 279 - .nr = &(nrX),\ 280 - .align = alignX,\ 281 - .init_work = init_workX,\ 282 - }; \ 283 - static struct dyn_array *__per_cpu_dyn_array_ptr_##nameX __used \ 284 - __attribute__((__section__(".per_cpu_dyn_array.init"))) = \ 285 - &__per_cpu_dyn_array_##nameX 286 - 287 - #define DEFINE_PER_CPU_DYN_ARRAY(nameX, sizeX, nrX, alignX, init_workX) \ 288 - DEFINE_PER_CPU_DYN_ARRAY_ADDR(nameX, nameX, nrX, alignX, init_workX) 289 - 290 - extern void pre_alloc_dyn_array(void); 291 - extern unsigned long per_cpu_dyn_array_size(unsigned long *align); 292 - extern void per_cpu_alloc_dyn_array(int cpu, char *ptr); 293 250 #endif /* __ASSEMBLY__ */ 294 251 295 252 /**
-15
include/linux/irq.h
··· 139 139 const char *typename; 140 140 }; 141 141 142 - struct timer_rand_state; 143 - struct irq_2_iommu; 144 142 /** 145 143 * struct irq_desc - interrupt descriptor 146 144 * ··· 165 167 */ 166 168 struct irq_desc { 167 169 unsigned int irq; 168 - #ifdef CONFIG_HAVE_DYN_ARRAY 169 - unsigned int *kstat_irqs; 170 - #endif 171 170 irq_flow_handler_t handle_irq; 172 171 struct irq_chip *chip; 173 172 struct msi_desc *msi_desc; ··· 193 198 } ____cacheline_internodealigned_in_smp; 194 199 195 200 196 - #ifndef CONFIG_HAVE_DYN_ARRAY 197 - /* could be removed if we get rid of all irq_desc reference */ 198 201 extern struct irq_desc irq_desc[NR_IRQS]; 199 - #else 200 - extern struct irq_desc *irq_desc; 201 - #endif 202 202 203 203 static inline struct irq_desc *irq_to_desc(unsigned int irq) 204 204 { 205 205 return (irq < nr_irqs) ? irq_desc + irq : NULL; 206 206 } 207 - 208 - #ifdef CONFIG_HAVE_DYN_ARRAY 209 - #define kstat_irqs_this_cpu(DESC) \ 210 - ((DESC)->kstat_irqs[smp_processor_id()]) 211 - #endif 212 207 213 208 /* 214 209 * Migration helpers for obsolete names, they will go away:
+6 -10
include/linux/kernel_stat.h
··· 28 28 29 29 struct kernel_stat { 30 30 struct cpu_usage_stat cpustat; 31 - #ifndef CONFIG_HAVE_DYN_ARRAY 32 31 unsigned int irqs[NR_IRQS]; 33 - #endif 34 32 }; 35 33 36 34 DECLARE_PER_CPU(struct kernel_stat, kstat); ··· 39 41 40 42 extern unsigned long long nr_context_switches(void); 41 43 42 - #ifndef CONFIG_HAVE_DYN_ARRAY 43 - #define kstat_irqs_this_cpu(irq) \ 44 - (kstat_this_cpu.irqs[irq]) 45 - #endif 44 + struct irq_desc; 46 45 46 + static inline void kstat_incr_irqs_this_cpu(unsigned int irq, 47 + struct irq_desc *desc) 48 + { 49 + kstat_this_cpu.irqs[irq]++; 50 + } 47 51 48 - #ifndef CONFIG_HAVE_DYN_ARRAY 49 52 static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) 50 53 { 51 54 return kstat_cpu(cpu).irqs[irq]; 52 55 } 53 - #else 54 - extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu); 55 - #endif 56 56 57 57 /* 58 58 * Number of interrupts per specific IRQ source, since bootup
+1 -1
init/Makefile
··· 2 2 # Makefile for the linux kernel. 3 3 # 4 4 5 - obj-y := main.o dyn_array.o version.o mounts.o 5 + obj-y := main.o version.o mounts.o 6 6 ifneq ($(CONFIG_BLK_DEV_INITRD),y) 7 7 obj-y += noinitramfs.o 8 8 else
-120
init/dyn_array.c
··· 1 - #include <linux/types.h> 2 - #include <linux/kernel.h> 3 - #include <linux/kallsyms.h> 4 - #include <linux/init.h> 5 - #include <linux/bootmem.h> 6 - #include <linux/irq.h> 7 - 8 - void __init pre_alloc_dyn_array(void) 9 - { 10 - #ifdef CONFIG_HAVE_DYN_ARRAY 11 - unsigned long total_size = 0, size, phys; 12 - unsigned long max_align = 1; 13 - struct dyn_array **daa; 14 - char *ptr; 15 - 16 - /* get the total size at first */ 17 - for (daa = __dyn_array_start ; daa < __dyn_array_end; daa++) { 18 - struct dyn_array *da = *daa; 19 - 20 - printk(KERN_DEBUG "dyn_array %pF size:%#lx nr:%d align:%#lx\n", 21 - da->name, da->size, *da->nr, da->align); 22 - size = da->size * (*da->nr); 23 - total_size += roundup(size, da->align); 24 - if (da->align > max_align) 25 - max_align = da->align; 26 - } 27 - if (total_size) 28 - printk(KERN_DEBUG "dyn_array total_size: %#lx\n", 29 - total_size); 30 - else 31 - return; 32 - 33 - /* allocate them all together */ 34 - max_align = max_t(unsigned long, max_align, PAGE_SIZE); 35 - ptr = __alloc_bootmem(total_size, max_align, 0); 36 - phys = virt_to_phys(ptr); 37 - 38 - for (daa = __dyn_array_start ; daa < __dyn_array_end; daa++) { 39 - struct dyn_array *da = *daa; 40 - 41 - size = da->size * (*da->nr); 42 - phys = roundup(phys, da->align); 43 - printk(KERN_DEBUG "dyn_array %pF ==> [%#lx - %#lx]\n", 44 - da->name, phys, phys + size); 45 - *da->name = phys_to_virt(phys); 46 - 47 - phys += size; 48 - 49 - if (da->init_work) 50 - da->init_work(da); 51 - } 52 - #else 53 - #ifdef CONFIG_GENERIC_HARDIRQS 54 - unsigned int i; 55 - 56 - for (i = 0; i < NR_IRQS; i++) 57 - irq_desc[i].irq = i; 58 - #endif 59 - #endif 60 - } 61 - 62 - unsigned long __init per_cpu_dyn_array_size(unsigned long *align) 63 - { 64 - unsigned long total_size = 0; 65 - #ifdef CONFIG_HAVE_DYN_ARRAY 66 - unsigned long size; 67 - struct dyn_array **daa; 68 - unsigned max_align = 1; 69 - 70 - for (daa = __per_cpu_dyn_array_start ; daa < __per_cpu_dyn_array_end; daa++) { 71 - struct dyn_array *da = *daa; 72 - 73 - printk(KERN_DEBUG "per_cpu_dyn_array %pF size:%#lx nr:%d align:%#lx\n", 74 - da->name, da->size, *da->nr, da->align); 75 - size = da->size * (*da->nr); 76 - total_size += roundup(size, da->align); 77 - if (da->align > max_align) 78 - max_align = da->align; 79 - } 80 - if (total_size) { 81 - printk(KERN_DEBUG "per_cpu_dyn_array total_size: %#lx\n", 82 - total_size); 83 - *align = max_align; 84 - } 85 - #endif 86 - return total_size; 87 - } 88 - 89 - #ifdef CONFIG_SMP 90 - void __init per_cpu_alloc_dyn_array(int cpu, char *ptr) 91 - { 92 - #ifdef CONFIG_HAVE_DYN_ARRAY 93 - unsigned long size, phys; 94 - struct dyn_array **daa; 95 - unsigned long addr; 96 - void **array; 97 - 98 - phys = virt_to_phys(ptr); 99 - for (daa = __per_cpu_dyn_array_start ; daa < __per_cpu_dyn_array_end; daa++) { 100 - struct dyn_array *da = *daa; 101 - 102 - size = da->size * (*da->nr); 103 - phys = roundup(phys, da->align); 104 - printk(KERN_DEBUG "per_cpu_dyn_array %pF ==> [%#lx - %#lx]\n", 105 - da->name, phys, phys + size); 106 - 107 - addr = (unsigned long)da->name; 108 - addr += per_cpu_offset(cpu); 109 - array = (void **)addr; 110 - *array = phys_to_virt(phys); 111 - *da->name = *array; /* so init_work could use it directly */ 112 - 113 - phys += size; 114 - 115 - if (da->init_work) 116 - da->init_work(da); 117 - } 118 - #endif 119 - } 120 - #endif
+2 -9
init/main.c
··· 391 391 392 392 static void __init setup_per_cpu_areas(void) 393 393 { 394 - unsigned long size, i, old_size; 394 + unsigned long size, i; 395 395 char *ptr; 396 396 unsigned long nr_possible_cpus = num_possible_cpus(); 397 - unsigned long align = 1; 398 - unsigned da_size; 399 397 400 398 /* Copy section for each CPU (we discard the original) */ 401 - old_size = PERCPU_ENOUGH_ROOM; 402 - da_size = per_cpu_dyn_array_size(&align); 403 - align = max_t(unsigned long, PAGE_SIZE, align); 404 - size = ALIGN(old_size + da_size, align); 399 + size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE); 405 400 ptr = alloc_bootmem_pages(size * nr_possible_cpus); 406 401 407 402 for_each_possible_cpu(i) { 408 403 __per_cpu_offset[i] = ptr - __per_cpu_start; 409 404 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 410 - per_cpu_alloc_dyn_array(i, ptr + old_size); 411 405 ptr += size; 412 406 } 413 407 } ··· 567 573 printk(KERN_NOTICE); 568 574 printk(linux_banner); 569 575 setup_arch(&command_line); 570 - pre_alloc_dyn_array(); 571 576 mm_init_owner(&init_mm, &init_task); 572 577 setup_command_line(command_line); 573 578 unwind_setup();
+5 -25
kernel/irq/chip.c
··· 326 326 if (unlikely(desc->status & IRQ_INPROGRESS)) 327 327 goto out_unlock; 328 328 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); 329 - #ifdef CONFIG_HAVE_DYN_ARRAY 330 - kstat_irqs_this_cpu(desc)++; 331 - #else 332 - kstat_irqs_this_cpu(irq)++; 333 - #endif 329 + kstat_incr_irqs_this_cpu(irq, desc); 334 330 335 331 action = desc->action; 336 332 if (unlikely(!action || (desc->status & IRQ_DISABLED))) ··· 367 371 if (unlikely(desc->status & IRQ_INPROGRESS)) 368 372 goto out_unlock; 369 373 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); 370 - #ifdef CONFIG_HAVE_DYN_ARRAY 371 - kstat_irqs_this_cpu(desc)++; 372 - #else 373 - kstat_irqs_this_cpu(irq)++; 374 - #endif 374 + kstat_incr_irqs_this_cpu(irq, desc); 375 375 376 376 /* 377 377 * If its disabled or no action available ··· 414 422 goto out; 415 423 416 424 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); 417 - #ifdef CONFIG_HAVE_DYN_ARRAY 418 - kstat_irqs_this_cpu(desc)++; 419 - #else 420 - kstat_irqs_this_cpu(irq)++; 421 - #endif 425 + kstat_incr_irqs_this_cpu(irq, desc); 422 426 423 427 /* 424 428 * If its disabled or no action available ··· 478 490 mask_ack_irq(desc, irq); 479 491 goto out_unlock; 480 492 } 481 - #ifdef CONFIG_HAVE_DYN_ARRAY 482 - kstat_irqs_this_cpu(desc)++; 483 - #else 484 - kstat_irqs_this_cpu(irq)++; 485 - #endif 493 + kstat_incr_irqs_this_cpu(irq, desc); 486 494 487 495 /* Start handling the irq */ 488 496 desc->chip->ack(irq); ··· 533 549 { 534 550 irqreturn_t action_ret; 535 551 536 - #ifdef CONFIG_HAVE_DYN_ARRAY 537 - kstat_irqs_this_cpu(desc)++; 538 - #else 539 - kstat_irqs_this_cpu(irq)++; 540 - #endif 552 + kstat_incr_irqs_this_cpu(irq, desc); 541 553 542 554 if (desc->chip->ack) 543 555 desc->chip->ack(irq);
+9 -105
kernel/irq/handle.c
··· 18 18 19 19 #include "internals.h" 20 20 21 - /* 22 - * lockdep: we want to handle all irq_desc locks as a single lock-class: 23 - */ 24 - static struct lock_class_key irq_desc_lock_class; 25 - 26 21 /** 27 22 * handle_bad_irq - handle spurious and unhandled irqs 28 23 * @irq: the interrupt number ··· 25 30 * 26 31 * Handles spurious and unhandled IRQ's. It also prints a debugmessage. 27 32 */ 28 - void 29 - handle_bad_irq(unsigned int irq, struct irq_desc *desc) 33 + void handle_bad_irq(unsigned int irq, struct irq_desc *desc) 30 34 { 31 35 print_irq_desc(irq, desc); 32 - #ifdef CONFIG_HAVE_DYN_ARRAY 33 - kstat_irqs_this_cpu(desc)++; 34 - #else 35 - kstat_irqs_this_cpu(irq)++; 36 - #endif 36 + kstat_incr_irqs_this_cpu(irq, desc); 37 37 ack_bad_irq(irq); 38 38 } 39 39 ··· 49 59 int nr_irqs = NR_IRQS; 50 60 EXPORT_SYMBOL_GPL(nr_irqs); 51 61 52 - #ifdef CONFIG_HAVE_DYN_ARRAY 53 - static struct irq_desc irq_desc_init = { 54 - .irq = -1U, 55 - .status = IRQ_DISABLED, 56 - .chip = &no_irq_chip, 57 - .handle_irq = handle_bad_irq, 58 - .depth = 1, 59 - .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), 60 - #ifdef CONFIG_SMP 61 - .affinity = CPU_MASK_ALL 62 - #endif 63 - }; 64 - 65 - 66 - static void init_one_irq_desc(struct irq_desc *desc) 67 - { 68 - memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); 69 - lockdep_set_class(&desc->lock, &irq_desc_lock_class); 70 - } 71 - 72 - extern int after_bootmem; 73 - extern void *__alloc_bootmem_nopanic(unsigned long size, 74 - unsigned long align, 75 - unsigned long goal); 76 - 77 - static void init_kstat_irqs(struct irq_desc *desc, int nr_desc, int nr) 78 - { 79 - unsigned long bytes, total_bytes; 80 - char *ptr; 81 - int i; 82 - unsigned long phys; 83 - 84 - /* Compute how many bytes we need per irq and allocate them */ 85 - bytes = nr * sizeof(unsigned int); 86 - total_bytes = bytes * nr_desc; 87 - if (after_bootmem) 88 - ptr = kzalloc(total_bytes, GFP_ATOMIC); 89 - else 90 - ptr = __alloc_bootmem_nopanic(total_bytes, PAGE_SIZE, 0); 91 - 92 - if (!ptr) 93 - panic(" can not allocate kstat_irqs\n"); 94 - 95 - phys = __pa(ptr); 96 - printk(KERN_DEBUG "kstat_irqs ==> [%#lx - %#lx]\n", phys, phys + total_bytes); 97 - 98 - for (i = 0; i < nr_desc; i++) { 99 - desc[i].kstat_irqs = (unsigned int *)ptr; 100 - ptr += bytes; 101 - } 102 - } 103 - 104 - static void __init init_work(void *data) 105 - { 106 - struct dyn_array *da = data; 107 - int i; 108 - struct irq_desc *desc; 109 - 110 - desc = *da->name; 111 - 112 - for (i = 0; i < *da->nr; i++) { 113 - init_one_irq_desc(&desc[i]); 114 - desc[i].irq = i; 115 - } 116 - 117 - /* init kstat_irqs, nr_cpu_ids is ready already */ 118 - init_kstat_irqs(desc, *da->nr, nr_cpu_ids); 119 - } 120 - 121 - struct irq_desc *irq_desc; 122 - DEFINE_DYN_ARRAY(irq_desc, sizeof(struct irq_desc), nr_irqs, PAGE_SIZE, init_work); 123 - 124 - #else 125 - 126 62 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { 127 63 [0 ... NR_IRQS-1] = { 128 64 .status = IRQ_DISABLED, ··· 61 145 #endif 62 146 } 63 147 }; 64 - 65 - #endif 66 148 67 149 /* 68 150 * What should we do if we get a hw irq event on an illegal vector? ··· 172 258 struct irqaction *action; 173 259 unsigned int status; 174 260 175 - #ifdef CONFIG_HAVE_DYN_ARRAY 176 - kstat_irqs_this_cpu(desc)++; 177 - #else 178 - kstat_irqs_this_cpu(irq)++; 179 - #endif 261 + kstat_incr_irqs_this_cpu(irq, desc); 262 + 180 263 if (CHECK_IRQ_PER_CPU(desc->status)) { 181 264 irqreturn_t action_ret; 182 265 ··· 262 351 263 352 264 353 #ifdef CONFIG_TRACE_IRQFLAGS 354 + /* 355 + * lockdep: we want to handle all irq_desc locks as a single lock-class: 356 + */ 357 + static struct lock_class_key irq_desc_lock_class; 358 + 265 359 void early_init_irq_lock_class(void) 266 360 { 267 - #ifndef CONFIG_HAVE_DYN_ARRAY 268 361 int i; 269 362 270 363 for (i = 0; i < nr_irqs; i++) 271 364 lockdep_set_class(&irq_desc[i].lock, &irq_desc_lock_class); 272 - #endif 273 365 } 274 366 #endif 275 - 276 - #ifdef CONFIG_HAVE_DYN_ARRAY 277 - unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) 278 - { 279 - struct irq_desc *desc = irq_to_desc(irq); 280 - return desc->kstat_irqs[cpu]; 281 - } 282 - #endif 283 - EXPORT_SYMBOL(kstat_irqs_cpu); 284 -