[PATCH] x86_64: Node local pda take 2 -- cpu_pda preparation

Helper patch to change cpu_pda users to use macros to access cpu_pda
instead of the cpu_pda[] array.

Signed-off-by: Ravikiran Thirumalai <kiran@scalex86.org>
Signed-off-by: Shai Fultheim <shai@scalex86.org>
Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Ravikiran G Thirumalai and committed by
Linus Torvalds
df79efde 05b3cbd8

+21 -20
+2 -2
arch/x86_64/kernel/irq.c
··· 70 70 seq_printf(p, "NMI: "); 71 71 for (j = 0; j < NR_CPUS; j++) 72 72 if (cpu_online(j)) 73 - seq_printf(p, "%10u ", cpu_pda[j].__nmi_count); 73 + seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count); 74 74 seq_putc(p, '\n'); 75 75 #ifdef CONFIG_X86_LOCAL_APIC 76 76 seq_printf(p, "LOC: "); 77 77 for (j = 0; j < NR_CPUS; j++) 78 78 if (cpu_online(j)) 79 - seq_printf(p, "%10u ", cpu_pda[j].apic_timer_irqs); 79 + seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs); 80 80 seq_putc(p, '\n'); 81 81 #endif 82 82 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
+3 -3
arch/x86_64/kernel/nmi.c
··· 155 155 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0); 156 156 157 157 for (cpu = 0; cpu < NR_CPUS; cpu++) 158 - counts[cpu] = cpu_pda[cpu].__nmi_count; 158 + counts[cpu] = cpu_pda(cpu)->__nmi_count; 159 159 local_irq_enable(); 160 160 mdelay((10*1000)/nmi_hz); // wait 10 ticks 161 161 162 162 for (cpu = 0; cpu < NR_CPUS; cpu++) { 163 163 if (!cpu_online(cpu)) 164 164 continue; 165 - if (cpu_pda[cpu].__nmi_count - counts[cpu] <= 5) { 165 + if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) { 166 166 endflag = 1; 167 167 printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n", 168 168 cpu, 169 169 counts[cpu], 170 - cpu_pda[cpu].__nmi_count); 170 + cpu_pda(cpu)->__nmi_count); 171 171 nmi_active = 0; 172 172 lapic_nmi_owner &= ~LAPIC_NMI_WATCHDOG; 173 173 nmi_perfctr_msr = 0;
+4 -4
arch/x86_64/kernel/setup64.c
··· 30 30 31 31 cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; 32 32 33 - struct x8664_pda cpu_pda[NR_CPUS] __cacheline_aligned; 33 + struct x8664_pda _cpu_pda[NR_CPUS] __cacheline_aligned; 34 34 35 35 struct desc_ptr idt_descr = { 256 * 16, (unsigned long) idt_table }; 36 36 ··· 110 110 } 111 111 if (!ptr) 112 112 panic("Cannot allocate cpu data for CPU %d\n", i); 113 - cpu_pda[i].data_offset = ptr - __per_cpu_start; 113 + cpu_pda(i)->data_offset = ptr - __per_cpu_start; 114 114 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 115 115 } 116 116 } 117 117 118 118 void pda_init(int cpu) 119 119 { 120 - struct x8664_pda *pda = &cpu_pda[cpu]; 120 + struct x8664_pda *pda = cpu_pda(cpu); 121 121 122 122 /* Setup up data that may be needed in __get_free_pages early */ 123 123 asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0)); 124 - wrmsrl(MSR_GS_BASE, cpu_pda + cpu); 124 + wrmsrl(MSR_GS_BASE, pda); 125 125 126 126 pda->cpunumber = cpu; 127 127 pda->irqcount = -1;
+1 -1
arch/x86_64/kernel/smpboot.c
··· 792 792 793 793 do_rest: 794 794 795 - cpu_pda[cpu].pcurrent = c_idle.idle; 795 + cpu_pda(cpu)->pcurrent = c_idle.idle; 796 796 797 797 start_rip = setup_trampoline(); 798 798
+5 -6
arch/x86_64/kernel/traps.c
··· 70 70 asmlinkage void alignment_check(void); 71 71 asmlinkage void machine_check(void); 72 72 asmlinkage void spurious_interrupt_bug(void); 73 - asmlinkage void call_debug(void); 74 73 75 74 struct notifier_block *die_chain; 76 75 static DEFINE_SPINLOCK(die_notifier_lock); ··· 138 139 switch (k + 1) { 139 140 #if DEBUG_STKSZ > EXCEPTION_STKSZ 140 141 case DEBUG_STACK: 141 - end = cpu_pda[cpu].debugstack + DEBUG_STKSZ; 142 + end = cpu_pda(cpu)->debugstack + DEBUG_STKSZ; 142 143 break; 143 144 #endif 144 145 default: ··· 185 186 { 186 187 unsigned long addr; 187 188 const unsigned cpu = safe_smp_processor_id(); 188 - unsigned long *irqstack_end = (unsigned long *)cpu_pda[cpu].irqstackptr; 189 + unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr; 189 190 int i; 190 191 unsigned used = 0; 191 192 ··· 253 254 unsigned long *stack; 254 255 int i; 255 256 const int cpu = safe_smp_processor_id(); 256 - unsigned long *irqstack_end = (unsigned long *) (cpu_pda[cpu].irqstackptr); 257 - unsigned long *irqstack = (unsigned long *) (cpu_pda[cpu].irqstackptr - IRQSTACKSIZE); 257 + unsigned long *irqstack_end = (unsigned long *) (cpu_pda(cpu)->irqstackptr); 258 + unsigned long *irqstack = (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE); 258 259 259 260 // debugging aid: "show_stack(NULL, NULL);" prints the 260 261 // back trace for this cpu. ··· 302 303 int in_kernel = !user_mode(regs); 303 304 unsigned long rsp; 304 305 const int cpu = safe_smp_processor_id(); 305 - struct task_struct *cur = cpu_pda[cpu].pcurrent; 306 + struct task_struct *cur = cpu_pda(cpu)->pcurrent; 306 307 307 308 rsp = regs->rsp; 308 309
+1 -1
arch/x86_64/kernel/x8664_ksyms.c
··· 96 96 EXPORT_SYMBOL(copy_page); 97 97 EXPORT_SYMBOL(clear_page); 98 98 99 - EXPORT_SYMBOL(cpu_pda); 99 + EXPORT_SYMBOL(_cpu_pda); 100 100 #ifdef CONFIG_SMP 101 101 EXPORT_SYMBOL(cpu_data); 102 102 EXPORT_SYMBOL(__write_lock_failed);
+1 -1
arch/x86_64/mm/numa.c
··· 272 272 273 273 void __cpuinit numa_set_node(int cpu, int node) 274 274 { 275 - cpu_pda[cpu].nodenumber = node; 275 + cpu_pda(cpu)->nodenumber = node; 276 276 cpu_to_node[cpu] = node; 277 277 } 278 278
+3 -1
include/asm-x86_64/pda.h
··· 27 27 unsigned apic_timer_irqs; 28 28 } ____cacheline_aligned_in_smp; 29 29 30 - extern struct x8664_pda cpu_pda[]; 30 + extern struct x8664_pda _cpu_pda[]; 31 + 32 + #define cpu_pda(i) (&_cpu_pda[i]) 31 33 32 34 /* 33 35 * There is no fast way to get the base address of the PDA, all the accesses
+1 -1
include/asm-x86_64/percpu.h
··· 11 11 12 12 #include <asm/pda.h> 13 13 14 - #define __per_cpu_offset(cpu) (cpu_pda[cpu].data_offset) 14 + #define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset) 15 15 #define __my_cpu_offset() read_pda(data_offset) 16 16 17 17 /* Separate out the type, so (int[3], foo) works. */