[PATCH] sched: add cacheflush() asm

Add per-arch sched_cacheflush() which is a write-back cacheflush used by
the migration-cost calibration code at bootup time.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by Ingo Molnar and committed by Linus Torvalds 4dc7a0bb c6b44d10

+148
+10
arch/ia64/kernel/setup.c
··· 60 60 #include <asm/smp.h> 61 61 #include <asm/system.h> 62 62 #include <asm/unistd.h> 63 + #include <asm/system.h> 63 64 64 65 #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE) 65 66 # error "struct cpuinfo_ia64 too big!" ··· 869 868 __get_cpu_var(ia64_phys_stacked_size_p8) = num_phys_stacked*8 + 8; 870 869 platform_cpu_init(); 871 870 pm_idle = default_idle; 871 + } 872 + 873 + /* 874 + * On SMP systems, when the scheduler does migration-cost autodetection, 875 + * it needs a way to flush as much of the CPU's caches as possible. 876 + */ 877 + void sched_cacheflush(void) 878 + { 879 + ia64_sal_cache_flush(3); 872 880 } 873 881 874 882 void
+10
include/asm-alpha/system.h
··· 140 140 struct task_struct; 141 141 extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*); 142 142 143 + /* 144 + * On SMP systems, when the scheduler does migration-cost autodetection, 145 + * it needs a way to flush as much of the CPU's caches as possible. 146 + * 147 + * TODO: fill this in! 148 + */ 149 + static inline void sched_cacheflush(void) 150 + { 151 + } 152 + 143 153 #define imb() \ 144 154 __asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory") 145 155
+10
include/asm-arm/system.h
··· 172 172 } while (0) 173 173 174 174 /* 175 + * On SMP systems, when the scheduler does migration-cost autodetection, 176 + * it needs a way to flush as much of the CPU's caches as possible. 177 + * 178 + * TODO: fill this in! 179 + */ 180 + static inline void sched_cacheflush(void) 181 + { 182 + } 183 + 184 + /* 175 185 * CPU interrupt mask handling. 176 186 */ 177 187 #if __LINUX_ARM_ARCH__ >= 6
+10
include/asm-arm26/system.h
··· 115 115 } while (0) 116 116 117 117 /* 118 + * On SMP systems, when the scheduler does migration-cost autodetection, 119 + * it needs a way to flush as much of the CPU's caches as possible. 120 + * 121 + * TODO: fill this in! 122 + */ 123 + static inline void sched_cacheflush(void) 124 + { 125 + } 126 + 127 + /* 118 128 * Save the current interrupt enable state & disable IRQs 119 129 */ 120 130 #define local_irq_save(x) \
+9
include/asm-i386/system.h
··· 548 548 extern int es7000_plat; 549 549 void cpu_idle_wait(void); 550 550 551 + /* 552 + * On SMP systems, when the scheduler does migration-cost autodetection, 553 + * it needs a way to flush as much of the CPU's caches as possible: 554 + */ 555 + static inline void sched_cacheflush(void) 556 + { 557 + wbinvd(); 558 + } 559 + 551 560 extern unsigned long arch_align_stack(unsigned long sp); 552 561 553 562 #endif
+1
include/asm-ia64/system.h
··· 279 279 #define ia64_platform_is(x) (strcmp(x, platform_name) == 0) 280 280 281 281 void cpu_idle_wait(void); 282 + void sched_cacheflush(void); 282 283 283 284 #define arch_align_stack(x) (x) 284 285
+10
include/asm-m32r/system.h
··· 68 68 last = __last; \ 69 69 } while(0) 70 70 71 + /* 72 + * On SMP systems, when the scheduler does migration-cost autodetection, 73 + * it needs a way to flush as much of the CPU's caches as possible. 74 + * 75 + * TODO: fill this in! 76 + */ 77 + static inline void sched_cacheflush(void) 78 + { 79 + } 80 + 71 81 /* Interrupt Control */ 72 82 #if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104) 73 83 #define local_irq_enable() \
+10
include/asm-mips/system.h
··· 164 164 __restore_dsp(current); \ 165 165 } while(0) 166 166 167 + /* 168 + * On SMP systems, when the scheduler does migration-cost autodetection, 169 + * it needs a way to flush as much of the CPU's caches as possible. 170 + * 171 + * TODO: fill this in! 172 + */ 173 + static inline void sched_cacheflush(void) 174 + { 175 + } 176 + 167 177 static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) 168 178 { 169 179 __u32 retval;
+9
include/asm-parisc/system.h
··· 49 49 (last) = _switch_to(prev, next); \ 50 50 } while(0) 51 51 52 + /* 53 + * On SMP systems, when the scheduler does migration-cost autodetection, 54 + * it needs a way to flush as much of the CPU's caches as possible. 55 + * 56 + * TODO: fill this in! 57 + */ 58 + static inline void sched_cacheflush(void) 59 + { 60 + } 52 61 53 62 54 63 /* interrupt control */
+10
include/asm-powerpc/system.h
··· 175 175 extern struct task_struct *_switch(struct thread_struct *prev, 176 176 struct thread_struct *next); 177 177 178 + /* 179 + * On SMP systems, when the scheduler does migration-cost autodetection, 180 + * it needs a way to flush as much of the CPU's caches as possible. 181 + * 182 + * TODO: fill this in! 183 + */ 184 + static inline void sched_cacheflush(void) 185 + { 186 + } 187 + 178 188 extern unsigned int rtas_data; 179 189 extern int mem_init_done; /* set on boot once kmalloc can be called */ 180 190 extern unsigned long memory_limit;
+10
include/asm-ppc/system.h
··· 123 123 struct task_struct *); 124 124 #define switch_to(prev, next, last) ((last) = __switch_to((prev), (next))) 125 125 126 + /* 127 + * On SMP systems, when the scheduler does migration-cost autodetection, 128 + * it needs a way to flush as much of the CPU's caches as possible. 129 + * 130 + * TODO: fill this in! 131 + */ 132 + static inline void sched_cacheflush(void) 133 + { 134 + } 135 + 126 136 struct thread_struct; 127 137 extern struct task_struct *_switch(struct thread_struct *prev, 128 138 struct thread_struct *next);
+10
include/asm-s390/system.h
··· 104 104 prev = __switch_to(prev,next); \ 105 105 } while (0) 106 106 107 + /* 108 + * On SMP systems, when the scheduler does migration-cost autodetection, 109 + * it needs a way to flush as much of the CPU's caches as possible. 110 + * 111 + * TODO: fill this in! 112 + */ 113 + static inline void sched_cacheflush(void) 114 + { 115 + } 116 + 107 117 #ifdef CONFIG_VIRT_CPU_ACCOUNTING 108 118 extern void account_user_vtime(struct task_struct *); 109 119 extern void account_system_vtime(struct task_struct *);
+10
include/asm-sh/system.h
··· 57 57 last = __last; \ 58 58 } while (0) 59 59 60 + /* 61 + * On SMP systems, when the scheduler does migration-cost autodetection, 62 + * it needs a way to flush as much of the CPU's caches as possible. 63 + * 64 + * TODO: fill this in! 65 + */ 66 + static inline void sched_cacheflush(void) 67 + { 68 + } 69 + 60 70 #define nop() __asm__ __volatile__ ("nop") 61 71 62 72
+10
include/asm-sparc/system.h
··· 166 166 } while(0) 167 167 168 168 /* 169 + * On SMP systems, when the scheduler does migration-cost autodetection, 170 + * it needs a way to flush as much of the CPU's caches as possible. 171 + * 172 + * TODO: fill this in! 173 + */ 174 + static inline void sched_cacheflush(void) 175 + { 176 + } 177 + 178 + /* 169 179 * Changing the IRQ level on the Sparc. 170 180 */ 171 181 extern void local_irq_restore(unsigned long);
+10
include/asm-sparc64/system.h
··· 253 253 } \ 254 254 } while(0) 255 255 256 + /* 257 + * On SMP systems, when the scheduler does migration-cost autodetection, 258 + * it needs a way to flush as much of the CPU's caches as possible. 259 + * 260 + * TODO: fill this in! 261 + */ 262 + static inline void sched_cacheflush(void) 263 + { 264 + } 265 + 256 266 static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val) 257 267 { 258 268 unsigned long tmp1, tmp2;
+9
include/asm-x86_64/system.h
··· 193 193 #define wbinvd() \ 194 194 __asm__ __volatile__ ("wbinvd": : :"memory"); 195 195 196 + /* 197 + * On SMP systems, when the scheduler does migration-cost autodetection, 198 + * it needs a way to flush as much of the CPU's caches as possible. 199 + */ 200 + static inline void sched_cacheflush(void) 201 + { 202 + wbinvd(); 203 + } 204 + 196 205 #endif /* __KERNEL__ */ 197 206 198 207 #define nop() __asm__ __volatile__ ("nop")