at v2.6.23-rc8 477 lines 12 kB view raw
1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle 7 * Copyright (C) 1996 by Paul M. Antoine 8 * Copyright (C) 1999 Silicon Graphics 9 * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com 10 * Copyright (C) 2000 MIPS Technologies, Inc. 11 */ 12#ifndef _ASM_SYSTEM_H 13#define _ASM_SYSTEM_H 14 15#include <linux/types.h> 16#include <linux/irqflags.h> 17 18#include <asm/addrspace.h> 19#include <asm/barrier.h> 20#include <asm/cpu-features.h> 21#include <asm/dsp.h> 22#include <asm/war.h> 23 24 25/* 26 * switch_to(n) should switch tasks to task nr n, first 27 * checking that n isn't the current task, in which case it does nothing. 28 */ 29extern asmlinkage void *resume(void *last, void *next, void *next_ti); 30 31struct task_struct; 32 33#ifdef CONFIG_MIPS_MT_FPAFF 34 35/* 36 * Handle the scheduler resume end of FPU affinity management. We do this 37 * inline to try to keep the overhead down. If we have been forced to run on 38 * a "CPU" with an FPU because of a previous high level of FP computation, 39 * but did not actually use the FPU during the most recent time-slice (CU1 40 * isn't set), we undo the restriction on cpus_allowed. 41 * 42 * We're not calling set_cpus_allowed() here, because we have no need to 43 * force prompt migration - we're already switching the current CPU to a 44 * different thread. 45 */ 46 47#define __mips_mt_fpaff_switch_to(prev) \ 48do { \ 49 struct thread_info *__prev_ti = task_thread_info(prev); \ 50 \ 51 if (cpu_has_fpu && \ 52 test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) && \ 53 (!(KSTK_STATUS(prev) & ST0_CU1))) { \ 54 clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND); \ 55 prev->cpus_allowed = prev->thread.user_cpus_allowed; \ 56 } \ 57 next->thread.emulated_fp = 0; \ 58} while(0) 59 60#else 61#define __mips_mt_fpaff_switch_to(prev) do { (void) (prev); } while (0) 62#endif 63 64#define switch_to(prev,next,last) \ 65do { \ 66 __mips_mt_fpaff_switch_to(prev); \ 67 if (cpu_has_dsp) \ 68 __save_dsp(prev); \ 69 (last) = resume(prev, next, task_thread_info(next)); \ 70 if (cpu_has_dsp) \ 71 __restore_dsp(current); \ 72 if (cpu_has_userlocal) \ 73 write_c0_userlocal(task_thread_info(current)->tp_value);\ 74} while(0) 75 76static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) 77{ 78 __u32 retval; 79 80 if (cpu_has_llsc && R10000_LLSC_WAR) { 81 unsigned long dummy; 82 83 __asm__ __volatile__( 84 " .set mips3 \n" 85 "1: ll %0, %3 # xchg_u32 \n" 86 " .set mips0 \n" 87 " move %2, %z4 \n" 88 " .set mips3 \n" 89 " sc %2, %1 \n" 90 " beqzl %2, 1b \n" 91 " .set mips0 \n" 92 : "=&r" (retval), "=m" (*m), "=&r" (dummy) 93 : "R" (*m), "Jr" (val) 94 : "memory"); 95 } else if (cpu_has_llsc) { 96 unsigned long dummy; 97 98 __asm__ __volatile__( 99 " .set mips3 \n" 100 "1: ll %0, %3 # xchg_u32 \n" 101 " .set mips0 \n" 102 " move %2, %z4 \n" 103 " .set mips3 \n" 104 " sc %2, %1 \n" 105 " beqz %2, 2f \n" 106 " .subsection 2 \n" 107 "2: b 1b \n" 108 " .previous \n" 109 " .set mips0 \n" 110 : "=&r" (retval), "=m" (*m), "=&r" (dummy) 111 : "R" (*m), "Jr" (val) 112 : "memory"); 113 } else { 114 unsigned long flags; 115 116 raw_local_irq_save(flags); 117 retval = *m; 118 *m = val; 119 raw_local_irq_restore(flags); /* implies memory barrier */ 120 } 121 122 smp_llsc_mb(); 123 124 return retval; 125} 126 127#ifdef CONFIG_64BIT 128static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) 129{ 130 __u64 retval; 131 132 if (cpu_has_llsc && R10000_LLSC_WAR) { 133 unsigned long dummy; 134 135 __asm__ __volatile__( 136 " .set mips3 \n" 137 "1: lld %0, %3 # xchg_u64 \n" 138 " move %2, %z4 \n" 139 " scd %2, %1 \n" 140 " beqzl %2, 1b \n" 141 " .set mips0 \n" 142 : "=&r" (retval), "=m" (*m), "=&r" (dummy) 143 : "R" (*m), "Jr" (val) 144 : "memory"); 145 } else if (cpu_has_llsc) { 146 unsigned long dummy; 147 148 __asm__ __volatile__( 149 " .set mips3 \n" 150 "1: lld %0, %3 # xchg_u64 \n" 151 " move %2, %z4 \n" 152 " scd %2, %1 \n" 153 " beqz %2, 2f \n" 154 " .subsection 2 \n" 155 "2: b 1b \n" 156 " .previous \n" 157 " .set mips0 \n" 158 : "=&r" (retval), "=m" (*m), "=&r" (dummy) 159 : "R" (*m), "Jr" (val) 160 : "memory"); 161 } else { 162 unsigned long flags; 163 164 raw_local_irq_save(flags); 165 retval = *m; 166 *m = val; 167 raw_local_irq_restore(flags); /* implies memory barrier */ 168 } 169 170 smp_llsc_mb(); 171 172 return retval; 173} 174#else 175extern __u64 __xchg_u64_unsupported_on_32bit_kernels(volatile __u64 * m, __u64 val); 176#define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels 177#endif 178 179/* This function doesn't exist, so you'll get a linker error 180 if something tries to do an invalid xchg(). */ 181extern void __xchg_called_with_bad_pointer(void); 182 183static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) 184{ 185 switch (size) { 186 case 4: 187 return __xchg_u32(ptr, x); 188 case 8: 189 return __xchg_u64(ptr, x); 190 } 191 __xchg_called_with_bad_pointer(); 192 return x; 193} 194 195#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 196 197#define __HAVE_ARCH_CMPXCHG 1 198 199static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old, 200 unsigned long new) 201{ 202 __u32 retval; 203 204 if (cpu_has_llsc && R10000_LLSC_WAR) { 205 __asm__ __volatile__( 206 " .set push \n" 207 " .set noat \n" 208 " .set mips3 \n" 209 "1: ll %0, %2 # __cmpxchg_u32 \n" 210 " bne %0, %z3, 2f \n" 211 " .set mips0 \n" 212 " move $1, %z4 \n" 213 " .set mips3 \n" 214 " sc $1, %1 \n" 215 " beqzl $1, 1b \n" 216 "2: \n" 217 " .set pop \n" 218 : "=&r" (retval), "=R" (*m) 219 : "R" (*m), "Jr" (old), "Jr" (new) 220 : "memory"); 221 } else if (cpu_has_llsc) { 222 __asm__ __volatile__( 223 " .set push \n" 224 " .set noat \n" 225 " .set mips3 \n" 226 "1: ll %0, %2 # __cmpxchg_u32 \n" 227 " bne %0, %z3, 2f \n" 228 " .set mips0 \n" 229 " move $1, %z4 \n" 230 " .set mips3 \n" 231 " sc $1, %1 \n" 232 " beqz $1, 3f \n" 233 "2: \n" 234 " .subsection 2 \n" 235 "3: b 1b \n" 236 " .previous \n" 237 " .set pop \n" 238 : "=&r" (retval), "=R" (*m) 239 : "R" (*m), "Jr" (old), "Jr" (new) 240 : "memory"); 241 } else { 242 unsigned long flags; 243 244 raw_local_irq_save(flags); 245 retval = *m; 246 if (retval == old) 247 *m = new; 248 raw_local_irq_restore(flags); /* implies memory barrier */ 249 } 250 251 smp_llsc_mb(); 252 253 return retval; 254} 255 256static inline unsigned long __cmpxchg_u32_local(volatile int * m, 257 unsigned long old, unsigned long new) 258{ 259 __u32 retval; 260 261 if (cpu_has_llsc && R10000_LLSC_WAR) { 262 __asm__ __volatile__( 263 " .set push \n" 264 " .set noat \n" 265 " .set mips3 \n" 266 "1: ll %0, %2 # __cmpxchg_u32 \n" 267 " bne %0, %z3, 2f \n" 268 " .set mips0 \n" 269 " move $1, %z4 \n" 270 " .set mips3 \n" 271 " sc $1, %1 \n" 272 " beqzl $1, 1b \n" 273 "2: \n" 274 " .set pop \n" 275 : "=&r" (retval), "=R" (*m) 276 : "R" (*m), "Jr" (old), "Jr" (new) 277 : "memory"); 278 } else if (cpu_has_llsc) { 279 __asm__ __volatile__( 280 " .set push \n" 281 " .set noat \n" 282 " .set mips3 \n" 283 "1: ll %0, %2 # __cmpxchg_u32 \n" 284 " bne %0, %z3, 2f \n" 285 " .set mips0 \n" 286 " move $1, %z4 \n" 287 " .set mips3 \n" 288 " sc $1, %1 \n" 289 " beqz $1, 1b \n" 290 "2: \n" 291 " .set pop \n" 292 : "=&r" (retval), "=R" (*m) 293 : "R" (*m), "Jr" (old), "Jr" (new) 294 : "memory"); 295 } else { 296 unsigned long flags; 297 298 local_irq_save(flags); 299 retval = *m; 300 if (retval == old) 301 *m = new; 302 local_irq_restore(flags); /* implies memory barrier */ 303 } 304 305 return retval; 306} 307 308#ifdef CONFIG_64BIT 309static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old, 310 unsigned long new) 311{ 312 __u64 retval; 313 314 if (cpu_has_llsc && R10000_LLSC_WAR) { 315 __asm__ __volatile__( 316 " .set push \n" 317 " .set noat \n" 318 " .set mips3 \n" 319 "1: lld %0, %2 # __cmpxchg_u64 \n" 320 " bne %0, %z3, 2f \n" 321 " move $1, %z4 \n" 322 " scd $1, %1 \n" 323 " beqzl $1, 1b \n" 324 "2: \n" 325 " .set pop \n" 326 : "=&r" (retval), "=R" (*m) 327 : "R" (*m), "Jr" (old), "Jr" (new) 328 : "memory"); 329 } else if (cpu_has_llsc) { 330 __asm__ __volatile__( 331 " .set push \n" 332 " .set noat \n" 333 " .set mips3 \n" 334 "1: lld %0, %2 # __cmpxchg_u64 \n" 335 " bne %0, %z3, 2f \n" 336 " move $1, %z4 \n" 337 " scd $1, %1 \n" 338 " beqz $1, 3f \n" 339 "2: \n" 340 " .subsection 2 \n" 341 "3: b 1b \n" 342 " .previous \n" 343 " .set pop \n" 344 : "=&r" (retval), "=R" (*m) 345 : "R" (*m), "Jr" (old), "Jr" (new) 346 : "memory"); 347 } else { 348 unsigned long flags; 349 350 raw_local_irq_save(flags); 351 retval = *m; 352 if (retval == old) 353 *m = new; 354 raw_local_irq_restore(flags); /* implies memory barrier */ 355 } 356 357 smp_llsc_mb(); 358 359 return retval; 360} 361 362static inline unsigned long __cmpxchg_u64_local(volatile int * m, 363 unsigned long old, unsigned long new) 364{ 365 __u64 retval; 366 367 if (cpu_has_llsc && R10000_LLSC_WAR) { 368 __asm__ __volatile__( 369 " .set push \n" 370 " .set noat \n" 371 " .set mips3 \n" 372 "1: lld %0, %2 # __cmpxchg_u64 \n" 373 " bne %0, %z3, 2f \n" 374 " move $1, %z4 \n" 375 " scd $1, %1 \n" 376 " beqzl $1, 1b \n" 377 "2: \n" 378 " .set pop \n" 379 : "=&r" (retval), "=R" (*m) 380 : "R" (*m), "Jr" (old), "Jr" (new) 381 : "memory"); 382 } else if (cpu_has_llsc) { 383 __asm__ __volatile__( 384 " .set push \n" 385 " .set noat \n" 386 " .set mips3 \n" 387 "1: lld %0, %2 # __cmpxchg_u64 \n" 388 " bne %0, %z3, 2f \n" 389 " move $1, %z4 \n" 390 " scd $1, %1 \n" 391 " beqz $1, 1b \n" 392 "2: \n" 393 " .set pop \n" 394 : "=&r" (retval), "=R" (*m) 395 : "R" (*m), "Jr" (old), "Jr" (new) 396 : "memory"); 397 } else { 398 unsigned long flags; 399 400 local_irq_save(flags); 401 retval = *m; 402 if (retval == old) 403 *m = new; 404 local_irq_restore(flags); /* implies memory barrier */ 405 } 406 407 return retval; 408} 409 410#else 411extern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels( 412 volatile int * m, unsigned long old, unsigned long new); 413#define __cmpxchg_u64 __cmpxchg_u64_unsupported_on_32bit_kernels 414extern unsigned long __cmpxchg_u64_local_unsupported_on_32bit_kernels( 415 volatile int * m, unsigned long old, unsigned long new); 416#define __cmpxchg_u64_local __cmpxchg_u64_local_unsupported_on_32bit_kernels 417#endif 418 419/* This function doesn't exist, so you'll get a linker error 420 if something tries to do an invalid cmpxchg(). */ 421extern void __cmpxchg_called_with_bad_pointer(void); 422 423static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old, 424 unsigned long new, int size) 425{ 426 switch (size) { 427 case 4: 428 return __cmpxchg_u32(ptr, old, new); 429 case 8: 430 return __cmpxchg_u64(ptr, old, new); 431 } 432 __cmpxchg_called_with_bad_pointer(); 433 return old; 434} 435 436static inline unsigned long __cmpxchg_local(volatile void * ptr, 437 unsigned long old, unsigned long new, int size) 438{ 439 switch (size) { 440 case 4: 441 return __cmpxchg_u32_local(ptr, old, new); 442 case 8: 443 return __cmpxchg_u64_local(ptr, old, new); 444 } 445 __cmpxchg_called_with_bad_pointer(); 446 return old; 447} 448 449#define cmpxchg(ptr,old,new) \ 450 ((__typeof__(*(ptr)))__cmpxchg((ptr), \ 451 (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr)))) 452 453#define cmpxchg_local(ptr,old,new) \ 454 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \ 455 (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr)))) 456 457extern void set_handler (unsigned long offset, void *addr, unsigned long len); 458extern void set_uncached_handler (unsigned long offset, void *addr, unsigned long len); 459 460typedef void (*vi_handler_t)(void); 461extern void *set_vi_handler (int n, vi_handler_t addr); 462 463extern void *set_except_vector(int n, void *addr); 464extern unsigned long ebase; 465extern void per_cpu_trap_init(void); 466 467extern int stop_a_enabled; 468 469/* 470 * See include/asm-ia64/system.h; prevents deadlock on SMP 471 * systems. 472 */ 473#define __ARCH_WANT_UNLOCKED_CTXSW 474 475extern unsigned long arch_align_stack(unsigned long sp); 476 477#endif /* _ASM_SYSTEM_H */