Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] x86-64: Some cleanup and optimization to the processor data area.

- Remove unused irqrsp field
- Remove pda->me
- Optimize set_softirq_pending slightly

Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Andi Kleen and committed by
Linus Torvalds
3f74478b 459192c9

+18 -14
-1
arch/x86_64/kernel/asm-offsets.c
··· 39 39 ENTRY(kernelstack); 40 40 ENTRY(oldrsp); 41 41 ENTRY(pcurrent); 42 - ENTRY(irqrsp); 43 42 ENTRY(irqcount); 44 43 ENTRY(cpunumber); 45 44 ENTRY(irqstackptr);
-1
arch/x86_64/kernel/setup64.c
··· 119 119 asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0)); 120 120 wrmsrl(MSR_GS_BASE, cpu_pda + cpu); 121 121 122 - pda->me = pda; 123 122 pda->cpunumber = cpu; 124 123 pda->irqcount = -1; 125 124 pda->kernelstack =
+5 -4
include/asm-x86_64/hardirq.h
··· 9 9 10 10 #define __ARCH_IRQ_STAT 1 11 11 12 - /* Generate a lvalue for a pda member. Should fix softirq.c instead to use 13 - special access macros. This would generate better code. */ 14 - #define __IRQ_STAT(cpu,member) (read_pda(me)->member) 12 + #define local_softirq_pending() read_pda(__softirq_pending) 15 13 16 - #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ 14 + #define __ARCH_SET_SOFTIRQ_PENDING 1 15 + 16 + #define set_softirq_pending(x) write_pda(__softirq_pending, (x)) 17 + #define or_softirq_pending(x) or_pda(__softirq_pending, (x)) 17 18 18 19 /* 19 20 * 'what should we do if we get a hw irq event on an illegal vector'.
+6 -6
include/asm-x86_64/pda.h
··· 10 10 struct x8664_pda { 11 11 struct task_struct *pcurrent; /* Current process */ 12 12 unsigned long data_offset; /* Per cpu data offset from linker address */ 13 - struct x8664_pda *me; /* Pointer to itself */ 14 13 unsigned long kernelstack; /* top of kernel stack for current */ 15 14 unsigned long oldrsp; /* user rsp for system call */ 16 - unsigned long irqrsp; /* Old rsp for interrupts. */ 17 15 int irqcount; /* Irq nesting counter. Starts with -1 */ 18 16 int cpunumber; /* Logical CPU number */ 19 17 char *irqstackptr; /* top of irqstack */ ··· 40 42 #define pda_offset(field) offsetof(struct x8664_pda, field) 41 43 42 44 #define pda_to_op(op,field,val) do { \ 45 + typedef typeof_field(struct x8664_pda, field) T__; \ 43 46 switch (sizeof_field(struct x8664_pda, field)) { \ 44 47 case 2: \ 45 - asm volatile(op "w %0,%%gs:%P1"::"r" (val),"i"(pda_offset(field)):"memory"); break; \ 48 + asm volatile(op "w %0,%%gs:%P1"::"ri" ((T__)val),"i"(pda_offset(field)):"memory"); break; \ 46 49 case 4: \ 47 - asm volatile(op "l %0,%%gs:%P1"::"r" (val),"i"(pda_offset(field)):"memory"); break; \ 50 + asm volatile(op "l %0,%%gs:%P1"::"ri" ((T__)val),"i"(pda_offset(field)):"memory"); break; \ 48 51 case 8: \ 49 - asm volatile(op "q %0,%%gs:%P1"::"r" (val),"i"(pda_offset(field)):"memory"); break; \ 52 + asm volatile(op "q %0,%%gs:%P1"::"ri" ((T__)val),"i"(pda_offset(field)):"memory"); break; \ 50 53 default: __bad_pda_field(); \ 51 54 } \ 52 55 } while (0) ··· 57 58 * Unfortunately removing them causes all hell to break lose currently. 58 59 */ 59 60 #define pda_from_op(op,field) ({ \ 60 - typedef typeof_field(struct x8664_pda, field) T__; T__ ret__; \ 61 + typeof_field(struct x8664_pda, field) ret__; \ 61 62 switch (sizeof_field(struct x8664_pda, field)) { \ 62 63 case 2: \ 63 64 asm volatile(op "w %%gs:%P1,%0":"=r" (ret__):"i"(pda_offset(field)):"memory"); break;\ ··· 74 75 #define write_pda(field,val) pda_to_op("mov",field,val) 75 76 #define add_pda(field,val) pda_to_op("add",field,val) 76 77 #define sub_pda(field,val) pda_to_op("sub",field,val) 78 + #define or_pda(field,val) pda_to_op("or",field,val) 77 79 78 80 #endif 79 81
+6 -1
include/linux/interrupt.h
··· 57 57 extern void enable_irq(unsigned int irq); 58 58 #endif 59 59 60 + #ifndef __ARCH_SET_SOFTIRQ_PENDING 61 + #define set_softirq_pending(x) (local_softirq_pending() = (x)) 62 + #define or_softirq_pending(x) (local_softirq_pending() |= (x)) 63 + #endif 64 + 60 65 /* 61 66 * Temporary defines for UP kernels, until all code gets fixed. 62 67 */ ··· 128 123 asmlinkage void do_softirq(void); 129 124 extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data); 130 125 extern void softirq_init(void); 131 - #define __raise_softirq_irqoff(nr) do { local_softirq_pending() |= 1UL << (nr); } while (0) 126 + #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0) 132 127 extern void FASTCALL(raise_softirq_irqoff(unsigned int nr)); 133 128 extern void FASTCALL(raise_softirq(unsigned int nr)); 134 129
+1 -1
kernel/softirq.c
··· 84 84 cpu = smp_processor_id(); 85 85 restart: 86 86 /* Reset the pending bitmask before enabling irqs */ 87 - local_softirq_pending() = 0; 87 + set_softirq_pending(0); 88 88 89 89 local_irq_enable(); 90 90