at v2.6.20-rc4 348 lines 8.6 kB view raw
1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle 7 * Copyright (C) 1996 by Paul M. Antoine 8 * Copyright (C) 1999 Silicon Graphics 9 * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com 10 * Copyright (C) 2000 MIPS Technologies, Inc. 11 */ 12#ifndef _ASM_SYSTEM_H 13#define _ASM_SYSTEM_H 14 15#include <linux/types.h> 16#include <linux/irqflags.h> 17 18#include <asm/addrspace.h> 19#include <asm/barrier.h> 20#include <asm/cpu-features.h> 21#include <asm/dsp.h> 22#include <asm/war.h> 23 24 25/* 26 * switch_to(n) should switch tasks to task nr n, first 27 * checking that n isn't the current task, in which case it does nothing. 28 */ 29extern asmlinkage void *resume(void *last, void *next, void *next_ti); 30 31struct task_struct; 32 33#ifdef CONFIG_MIPS_MT_FPAFF 34 35/* 36 * Handle the scheduler resume end of FPU affinity management. We do this 37 * inline to try to keep the overhead down. If we have been forced to run on 38 * a "CPU" with an FPU because of a previous high level of FP computation, 39 * but did not actually use the FPU during the most recent time-slice (CU1 40 * isn't set), we undo the restriction on cpus_allowed. 41 * 42 * We're not calling set_cpus_allowed() here, because we have no need to 43 * force prompt migration - we're already switching the current CPU to a 44 * different thread. 45 */ 46 47#define switch_to(prev,next,last) \ 48do { \ 49 if (cpu_has_fpu && \ 50 (prev->thread.mflags & MF_FPUBOUND) && \ 51 (!(KSTK_STATUS(prev) & ST0_CU1))) { \ 52 prev->thread.mflags &= ~MF_FPUBOUND; \ 53 prev->cpus_allowed = prev->thread.user_cpus_allowed; \ 54 } \ 55 if (cpu_has_dsp) \ 56 __save_dsp(prev); \ 57 next->thread.emulated_fp = 0; \ 58 (last) = resume(prev, next, next->thread_info); \ 59 if (cpu_has_dsp) \ 60 __restore_dsp(current); \ 61} while(0) 62 63#else 64#define switch_to(prev,next,last) \ 65do { \ 66 if (cpu_has_dsp) \ 67 __save_dsp(prev); \ 68 (last) = resume(prev, next, task_thread_info(next)); \ 69 if (cpu_has_dsp) \ 70 __restore_dsp(current); \ 71} while(0) 72#endif 73 74/* 75 * On SMP systems, when the scheduler does migration-cost autodetection, 76 * it needs a way to flush as much of the CPU's caches as possible. 77 * 78 * TODO: fill this in! 79 */ 80static inline void sched_cacheflush(void) 81{ 82} 83 84static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) 85{ 86 __u32 retval; 87 88 if (cpu_has_llsc && R10000_LLSC_WAR) { 89 unsigned long dummy; 90 91 __asm__ __volatile__( 92 " .set mips3 \n" 93 "1: ll %0, %3 # xchg_u32 \n" 94 " .set mips0 \n" 95 " move %2, %z4 \n" 96 " .set mips3 \n" 97 " sc %2, %1 \n" 98 " beqzl %2, 1b \n" 99 " .set mips0 \n" 100 : "=&r" (retval), "=m" (*m), "=&r" (dummy) 101 : "R" (*m), "Jr" (val) 102 : "memory"); 103 } else if (cpu_has_llsc) { 104 unsigned long dummy; 105 106 __asm__ __volatile__( 107 " .set mips3 \n" 108 "1: ll %0, %3 # xchg_u32 \n" 109 " .set mips0 \n" 110 " move %2, %z4 \n" 111 " .set mips3 \n" 112 " sc %2, %1 \n" 113 " beqz %2, 1b \n" 114 " .set mips0 \n" 115 : "=&r" (retval), "=m" (*m), "=&r" (dummy) 116 : "R" (*m), "Jr" (val) 117 : "memory"); 118 } else { 119 unsigned long flags; 120 121 local_irq_save(flags); 122 retval = *m; 123 *m = val; 124 local_irq_restore(flags); /* implies memory barrier */ 125 } 126 127 smp_mb(); 128 129 return retval; 130} 131 132#ifdef CONFIG_64BIT 133static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) 134{ 135 __u64 retval; 136 137 if (cpu_has_llsc && R10000_LLSC_WAR) { 138 unsigned long dummy; 139 140 __asm__ __volatile__( 141 " .set mips3 \n" 142 "1: lld %0, %3 # xchg_u64 \n" 143 " move %2, %z4 \n" 144 " scd %2, %1 \n" 145 " beqzl %2, 1b \n" 146 " .set mips0 \n" 147 : "=&r" (retval), "=m" (*m), "=&r" (dummy) 148 : "R" (*m), "Jr" (val) 149 : "memory"); 150 } else if (cpu_has_llsc) { 151 unsigned long dummy; 152 153 __asm__ __volatile__( 154 " .set mips3 \n" 155 "1: lld %0, %3 # xchg_u64 \n" 156 " move %2, %z4 \n" 157 " scd %2, %1 \n" 158 " beqz %2, 1b \n" 159 " .set mips0 \n" 160 : "=&r" (retval), "=m" (*m), "=&r" (dummy) 161 : "R" (*m), "Jr" (val) 162 : "memory"); 163 } else { 164 unsigned long flags; 165 166 local_irq_save(flags); 167 retval = *m; 168 *m = val; 169 local_irq_restore(flags); /* implies memory barrier */ 170 } 171 172 smp_mb(); 173 174 return retval; 175} 176#else 177extern __u64 __xchg_u64_unsupported_on_32bit_kernels(volatile __u64 * m, __u64 val); 178#define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels 179#endif 180 181/* This function doesn't exist, so you'll get a linker error 182 if something tries to do an invalid xchg(). */ 183extern void __xchg_called_with_bad_pointer(void); 184 185static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) 186{ 187 switch (size) { 188 case 4: 189 return __xchg_u32(ptr, x); 190 case 8: 191 return __xchg_u64(ptr, x); 192 } 193 __xchg_called_with_bad_pointer(); 194 return x; 195} 196 197#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 198#define tas(ptr) (xchg((ptr),1)) 199 200#define __HAVE_ARCH_CMPXCHG 1 201 202static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old, 203 unsigned long new) 204{ 205 __u32 retval; 206 207 if (cpu_has_llsc && R10000_LLSC_WAR) { 208 __asm__ __volatile__( 209 " .set push \n" 210 " .set noat \n" 211 " .set mips3 \n" 212 "1: ll %0, %2 # __cmpxchg_u32 \n" 213 " bne %0, %z3, 2f \n" 214 " .set mips0 \n" 215 " move $1, %z4 \n" 216 " .set mips3 \n" 217 " sc $1, %1 \n" 218 " beqzl $1, 1b \n" 219 "2: \n" 220 " .set pop \n" 221 : "=&r" (retval), "=R" (*m) 222 : "R" (*m), "Jr" (old), "Jr" (new) 223 : "memory"); 224 } else if (cpu_has_llsc) { 225 __asm__ __volatile__( 226 " .set push \n" 227 " .set noat \n" 228 " .set mips3 \n" 229 "1: ll %0, %2 # __cmpxchg_u32 \n" 230 " bne %0, %z3, 2f \n" 231 " .set mips0 \n" 232 " move $1, %z4 \n" 233 " .set mips3 \n" 234 " sc $1, %1 \n" 235 " beqz $1, 1b \n" 236 "2: \n" 237 " .set pop \n" 238 : "=&r" (retval), "=R" (*m) 239 : "R" (*m), "Jr" (old), "Jr" (new) 240 : "memory"); 241 } else { 242 unsigned long flags; 243 244 local_irq_save(flags); 245 retval = *m; 246 if (retval == old) 247 *m = new; 248 local_irq_restore(flags); /* implies memory barrier */ 249 } 250 251 smp_mb(); 252 253 return retval; 254} 255 256#ifdef CONFIG_64BIT 257static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old, 258 unsigned long new) 259{ 260 __u64 retval; 261 262 if (cpu_has_llsc && R10000_LLSC_WAR) { 263 __asm__ __volatile__( 264 " .set push \n" 265 " .set noat \n" 266 " .set mips3 \n" 267 "1: lld %0, %2 # __cmpxchg_u64 \n" 268 " bne %0, %z3, 2f \n" 269 " move $1, %z4 \n" 270 " scd $1, %1 \n" 271 " beqzl $1, 1b \n" 272 "2: \n" 273 " .set pop \n" 274 : "=&r" (retval), "=R" (*m) 275 : "R" (*m), "Jr" (old), "Jr" (new) 276 : "memory"); 277 } else if (cpu_has_llsc) { 278 __asm__ __volatile__( 279 " .set push \n" 280 " .set noat \n" 281 " .set mips3 \n" 282 "1: lld %0, %2 # __cmpxchg_u64 \n" 283 " bne %0, %z3, 2f \n" 284 " move $1, %z4 \n" 285 " scd $1, %1 \n" 286 " beqz $1, 1b \n" 287 "2: \n" 288 " .set pop \n" 289 : "=&r" (retval), "=R" (*m) 290 : "R" (*m), "Jr" (old), "Jr" (new) 291 : "memory"); 292 } else { 293 unsigned long flags; 294 295 local_irq_save(flags); 296 retval = *m; 297 if (retval == old) 298 *m = new; 299 local_irq_restore(flags); /* implies memory barrier */ 300 } 301 302 smp_mb(); 303 304 return retval; 305} 306#else 307extern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels( 308 volatile int * m, unsigned long old, unsigned long new); 309#define __cmpxchg_u64 __cmpxchg_u64_unsupported_on_32bit_kernels 310#endif 311 312/* This function doesn't exist, so you'll get a linker error 313 if something tries to do an invalid cmpxchg(). */ 314extern void __cmpxchg_called_with_bad_pointer(void); 315 316static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old, 317 unsigned long new, int size) 318{ 319 switch (size) { 320 case 4: 321 return __cmpxchg_u32(ptr, old, new); 322 case 8: 323 return __cmpxchg_u64(ptr, old, new); 324 } 325 __cmpxchg_called_with_bad_pointer(); 326 return old; 327} 328 329#define cmpxchg(ptr,old,new) ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr)))) 330 331extern void set_handler (unsigned long offset, void *addr, unsigned long len); 332extern void set_uncached_handler (unsigned long offset, void *addr, unsigned long len); 333extern void *set_vi_handler (int n, void *addr); 334extern void *set_except_vector(int n, void *addr); 335extern unsigned long ebase; 336extern void per_cpu_trap_init(void); 337 338extern int stop_a_enabled; 339 340/* 341 * See include/asm-ia64/system.h; prevents deadlock on SMP 342 * systems. 343 */ 344#define __ARCH_WANT_UNLOCKED_CTXSW 345 346#define arch_align_stack(x) (x) 347 348#endif /* _ASM_SYSTEM_H */