Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sh: move fpu_counter into ARCH specific thread_struct

Only a couple of arches (sh/x86) use fpu_counter in task_struct so it can
be moved out into ARCH specific thread_struct, reducing the size of
task_struct for other arches.

Compile tested sh defconfig + sh4-linux-gcc (4.6.3)

Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Cc: Paul Mundt <paul.mundt@gmail.com>
Cc: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Vineet Gupta and committed by
Linus Torvalds
616c05d1 261adc9a

+25 -5
+1 -1
arch/sh/include/asm/fpu.h
··· 46 46 save_fpu(tsk); 47 47 release_fpu(regs); 48 48 } else 49 - tsk->fpu_counter = 0; 49 + tsk->thread.fpu_counter = 0; 50 50 } 51 51 52 52 static inline void unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs)
+10
arch/sh/include/asm/processor_32.h
··· 111 111 112 112 /* Extended processor state */ 113 113 union thread_xstate *xstate; 114 + 115 + /* 116 + * fpu_counter contains the number of consecutive context switches 117 + * that the FPU is used. If this is over a threshold, the lazy fpu 118 + * saving becomes unlazy to save the trap. This is an unsigned char 119 + * so that after 256 times the counter wraps and the behavior turns 120 + * lazy again; this to deal with bursty apps that only use FPU for 121 + * a short time 122 + */ 123 + unsigned char fpu_counter; 114 124 }; 115 125 116 126 #define INIT_THREAD { \
+10
arch/sh/include/asm/processor_64.h
··· 126 126 127 127 /* floating point info */ 128 128 union thread_xstate *xstate; 129 + 130 + /* 131 + * fpu_counter contains the number of consecutive context switches 132 + * that the FPU is used. If this is over a threshold, the lazy fpu 133 + * saving becomes unlazy to save the trap. This is an unsigned char 134 + * so that after 256 times the counter wraps and the behavior turns 135 + * lazy again; this to deal with bursty apps that only use FPU for 136 + * a short time 137 + */ 138 + unsigned char fpu_counter; 129 139 }; 130 140 131 141 #define INIT_MMAP \
+1 -1
arch/sh/kernel/cpu/fpu.c
··· 44 44 restore_fpu(tsk); 45 45 46 46 task_thread_info(tsk)->status |= TS_USEDFPU; 47 - tsk->fpu_counter++; 47 + tsk->thread.fpu_counter++; 48 48 } 49 49 50 50 void fpu_state_restore(struct pt_regs *regs)
+3 -3
arch/sh/kernel/process_32.c
··· 156 156 #endif 157 157 ti->addr_limit = KERNEL_DS; 158 158 ti->status &= ~TS_USEDFPU; 159 - p->fpu_counter = 0; 159 + p->thread.fpu_counter = 0; 160 160 return 0; 161 161 } 162 162 *childregs = *current_pt_regs(); ··· 189 189 unlazy_fpu(prev, task_pt_regs(prev)); 190 190 191 191 /* we're going to use this soon, after a few expensive things */ 192 - if (next->fpu_counter > 5) 192 + if (next->thread.fpu_counter > 5) 193 193 prefetch(next_t->xstate); 194 194 195 195 #ifdef CONFIG_MMU ··· 207 207 * restore of the math state immediately to avoid the trap; the 208 208 * chances of needing FPU soon are obviously high now 209 209 */ 210 - if (next->fpu_counter > 5) 210 + if (next->thread.fpu_counter > 5) 211 211 __fpu_state_restore(); 212 212 213 213 return prev;