at v2.6.18-rc2 493 lines 12 kB view raw
1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle 7 * Copyright (C) 1996 by Paul M. Antoine 8 * Copyright (C) 1999 Silicon Graphics 9 * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com 10 * Copyright (C) 2000 MIPS Technologies, Inc. 11 */ 12#ifndef _ASM_SYSTEM_H 13#define _ASM_SYSTEM_H 14 15#include <linux/types.h> 16#include <linux/irqflags.h> 17 18#include <asm/addrspace.h> 19#include <asm/cpu-features.h> 20#include <asm/dsp.h> 21#include <asm/ptrace.h> 22#include <asm/war.h> 23 24/* 25 * read_barrier_depends - Flush all pending reads that subsequents reads 26 * depend on. 27 * 28 * No data-dependent reads from memory-like regions are ever reordered 29 * over this barrier. All reads preceding this primitive are guaranteed 30 * to access memory (but not necessarily other CPUs' caches) before any 31 * reads following this primitive that depend on the data return by 32 * any of the preceding reads. This primitive is much lighter weight than 33 * rmb() on most CPUs, and is never heavier weight than is 34 * rmb(). 35 * 36 * These ordering constraints are respected by both the local CPU 37 * and the compiler. 38 * 39 * Ordering is not guaranteed by anything other than these primitives, 40 * not even by data dependencies. See the documentation for 41 * memory_barrier() for examples and URLs to more information. 42 * 43 * For example, the following code would force ordering (the initial 44 * value of "a" is zero, "b" is one, and "p" is "&a"): 45 * 46 * <programlisting> 47 * CPU 0 CPU 1 48 * 49 * b = 2; 50 * memory_barrier(); 51 * p = &b; q = p; 52 * read_barrier_depends(); 53 * d = *q; 54 * </programlisting> 55 * 56 * because the read of "*q" depends on the read of "p" and these 57 * two reads are separated by a read_barrier_depends(). However, 58 * the following code, with the same initial values for "a" and "b": 59 * 60 * <programlisting> 61 * CPU 0 CPU 1 62 * 63 * a = 2; 64 * memory_barrier(); 65 * b = 3; y = b; 66 * read_barrier_depends(); 67 * x = a; 68 * </programlisting> 69 * 70 * does not enforce ordering, since there is no data dependency between 71 * the read of "a" and the read of "b". Therefore, on some CPUs, such 72 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() 73 * in cases like this where there are no data dependencies. 74 */ 75 76#define read_barrier_depends() do { } while(0) 77 78#ifdef CONFIG_CPU_HAS_SYNC 79#define __sync() \ 80 __asm__ __volatile__( \ 81 ".set push\n\t" \ 82 ".set noreorder\n\t" \ 83 ".set mips2\n\t" \ 84 "sync\n\t" \ 85 ".set pop" \ 86 : /* no output */ \ 87 : /* no input */ \ 88 : "memory") 89#else 90#define __sync() do { } while(0) 91#endif 92 93#define __fast_iob() \ 94 __asm__ __volatile__( \ 95 ".set push\n\t" \ 96 ".set noreorder\n\t" \ 97 "lw $0,%0\n\t" \ 98 "nop\n\t" \ 99 ".set pop" \ 100 : /* no output */ \ 101 : "m" (*(int *)CKSEG1) \ 102 : "memory") 103 104#define fast_wmb() __sync() 105#define fast_rmb() __sync() 106#define fast_mb() __sync() 107#define fast_iob() \ 108 do { \ 109 __sync(); \ 110 __fast_iob(); \ 111 } while (0) 112 113#ifdef CONFIG_CPU_HAS_WB 114 115#include <asm/wbflush.h> 116 117#define wmb() fast_wmb() 118#define rmb() fast_rmb() 119#define mb() wbflush() 120#define iob() wbflush() 121 122#else /* !CONFIG_CPU_HAS_WB */ 123 124#define wmb() fast_wmb() 125#define rmb() fast_rmb() 126#define mb() fast_mb() 127#define iob() fast_iob() 128 129#endif /* !CONFIG_CPU_HAS_WB */ 130 131#ifdef CONFIG_SMP 132#define smp_mb() mb() 133#define smp_rmb() rmb() 134#define smp_wmb() wmb() 135#define smp_read_barrier_depends() read_barrier_depends() 136#else 137#define smp_mb() barrier() 138#define smp_rmb() barrier() 139#define smp_wmb() barrier() 140#define smp_read_barrier_depends() do { } while(0) 141#endif 142 143#define set_mb(var, value) \ 144do { var = value; mb(); } while (0) 145 146/* 147 * switch_to(n) should switch tasks to task nr n, first 148 * checking that n isn't the current task, in which case it does nothing. 149 */ 150extern asmlinkage void *resume(void *last, void *next, void *next_ti); 151 152struct task_struct; 153 154#ifdef CONFIG_MIPS_MT_FPAFF 155 156/* 157 * Handle the scheduler resume end of FPU affinity management. We do this 158 * inline to try to keep the overhead down. If we have been forced to run on 159 * a "CPU" with an FPU because of a previous high level of FP computation, 160 * but did not actually use the FPU during the most recent time-slice (CU1 161 * isn't set), we undo the restriction on cpus_allowed. 162 * 163 * We're not calling set_cpus_allowed() here, because we have no need to 164 * force prompt migration - we're already switching the current CPU to a 165 * different thread. 166 */ 167 168#define switch_to(prev,next,last) \ 169do { \ 170 if (cpu_has_fpu && \ 171 (prev->thread.mflags & MF_FPUBOUND) && \ 172 (!(KSTK_STATUS(prev) & ST0_CU1))) { \ 173 prev->thread.mflags &= ~MF_FPUBOUND; \ 174 prev->cpus_allowed = prev->thread.user_cpus_allowed; \ 175 } \ 176 if (cpu_has_dsp) \ 177 __save_dsp(prev); \ 178 next->thread.emulated_fp = 0; \ 179 (last) = resume(prev, next, next->thread_info); \ 180 if (cpu_has_dsp) \ 181 __restore_dsp(current); \ 182} while(0) 183 184#else 185#define switch_to(prev,next,last) \ 186do { \ 187 if (cpu_has_dsp) \ 188 __save_dsp(prev); \ 189 (last) = resume(prev, next, task_thread_info(next)); \ 190 if (cpu_has_dsp) \ 191 __restore_dsp(current); \ 192} while(0) 193#endif 194 195/* 196 * On SMP systems, when the scheduler does migration-cost autodetection, 197 * it needs a way to flush as much of the CPU's caches as possible. 198 * 199 * TODO: fill this in! 200 */ 201static inline void sched_cacheflush(void) 202{ 203} 204 205static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) 206{ 207 __u32 retval; 208 209 if (cpu_has_llsc && R10000_LLSC_WAR) { 210 unsigned long dummy; 211 212 __asm__ __volatile__( 213 " .set mips3 \n" 214 "1: ll %0, %3 # xchg_u32 \n" 215 " .set mips0 \n" 216 " move %2, %z4 \n" 217 " .set mips3 \n" 218 " sc %2, %1 \n" 219 " beqzl %2, 1b \n" 220#ifdef CONFIG_SMP 221 " sync \n" 222#endif 223 " .set mips0 \n" 224 : "=&r" (retval), "=m" (*m), "=&r" (dummy) 225 : "R" (*m), "Jr" (val) 226 : "memory"); 227 } else if (cpu_has_llsc) { 228 unsigned long dummy; 229 230 __asm__ __volatile__( 231 " .set mips3 \n" 232 "1: ll %0, %3 # xchg_u32 \n" 233 " .set mips0 \n" 234 " move %2, %z4 \n" 235 " .set mips3 \n" 236 " sc %2, %1 \n" 237 " beqz %2, 1b \n" 238#ifdef CONFIG_SMP 239 " sync \n" 240#endif 241 " .set mips0 \n" 242 : "=&r" (retval), "=m" (*m), "=&r" (dummy) 243 : "R" (*m), "Jr" (val) 244 : "memory"); 245 } else { 246 unsigned long flags; 247 248 local_irq_save(flags); 249 retval = *m; 250 *m = val; 251 local_irq_restore(flags); /* implies memory barrier */ 252 } 253 254 return retval; 255} 256 257#ifdef CONFIG_64BIT 258static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) 259{ 260 __u64 retval; 261 262 if (cpu_has_llsc && R10000_LLSC_WAR) { 263 unsigned long dummy; 264 265 __asm__ __volatile__( 266 " .set mips3 \n" 267 "1: lld %0, %3 # xchg_u64 \n" 268 " move %2, %z4 \n" 269 " scd %2, %1 \n" 270 " beqzl %2, 1b \n" 271#ifdef CONFIG_SMP 272 " sync \n" 273#endif 274 " .set mips0 \n" 275 : "=&r" (retval), "=m" (*m), "=&r" (dummy) 276 : "R" (*m), "Jr" (val) 277 : "memory"); 278 } else if (cpu_has_llsc) { 279 unsigned long dummy; 280 281 __asm__ __volatile__( 282 " .set mips3 \n" 283 "1: lld %0, %3 # xchg_u64 \n" 284 " move %2, %z4 \n" 285 " scd %2, %1 \n" 286 " beqz %2, 1b \n" 287#ifdef CONFIG_SMP 288 " sync \n" 289#endif 290 " .set mips0 \n" 291 : "=&r" (retval), "=m" (*m), "=&r" (dummy) 292 : "R" (*m), "Jr" (val) 293 : "memory"); 294 } else { 295 unsigned long flags; 296 297 local_irq_save(flags); 298 retval = *m; 299 *m = val; 300 local_irq_restore(flags); /* implies memory barrier */ 301 } 302 303 return retval; 304} 305#else 306extern __u64 __xchg_u64_unsupported_on_32bit_kernels(volatile __u64 * m, __u64 val); 307#define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels 308#endif 309 310/* This function doesn't exist, so you'll get a linker error 311 if something tries to do an invalid xchg(). */ 312extern void __xchg_called_with_bad_pointer(void); 313 314static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) 315{ 316 switch (size) { 317 case 4: 318 return __xchg_u32(ptr, x); 319 case 8: 320 return __xchg_u64(ptr, x); 321 } 322 __xchg_called_with_bad_pointer(); 323 return x; 324} 325 326#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 327#define tas(ptr) (xchg((ptr),1)) 328 329#define __HAVE_ARCH_CMPXCHG 1 330 331static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old, 332 unsigned long new) 333{ 334 __u32 retval; 335 336 if (cpu_has_llsc && R10000_LLSC_WAR) { 337 __asm__ __volatile__( 338 " .set push \n" 339 " .set noat \n" 340 " .set mips3 \n" 341 "1: ll %0, %2 # __cmpxchg_u32 \n" 342 " bne %0, %z3, 2f \n" 343 " .set mips0 \n" 344 " move $1, %z4 \n" 345 " .set mips3 \n" 346 " sc $1, %1 \n" 347 " beqzl $1, 1b \n" 348#ifdef CONFIG_SMP 349 " sync \n" 350#endif 351 "2: \n" 352 " .set pop \n" 353 : "=&r" (retval), "=R" (*m) 354 : "R" (*m), "Jr" (old), "Jr" (new) 355 : "memory"); 356 } else if (cpu_has_llsc) { 357 __asm__ __volatile__( 358 " .set push \n" 359 " .set noat \n" 360 " .set mips3 \n" 361 "1: ll %0, %2 # __cmpxchg_u32 \n" 362 " bne %0, %z3, 2f \n" 363 " .set mips0 \n" 364 " move $1, %z4 \n" 365 " .set mips3 \n" 366 " sc $1, %1 \n" 367 " beqz $1, 1b \n" 368#ifdef CONFIG_SMP 369 " sync \n" 370#endif 371 "2: \n" 372 " .set pop \n" 373 : "=&r" (retval), "=R" (*m) 374 : "R" (*m), "Jr" (old), "Jr" (new) 375 : "memory"); 376 } else { 377 unsigned long flags; 378 379 local_irq_save(flags); 380 retval = *m; 381 if (retval == old) 382 *m = new; 383 local_irq_restore(flags); /* implies memory barrier */ 384 } 385 386 return retval; 387} 388 389#ifdef CONFIG_64BIT 390static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old, 391 unsigned long new) 392{ 393 __u64 retval; 394 395 if (cpu_has_llsc) { 396 __asm__ __volatile__( 397 " .set push \n" 398 " .set noat \n" 399 " .set mips3 \n" 400 "1: lld %0, %2 # __cmpxchg_u64 \n" 401 " bne %0, %z3, 2f \n" 402 " move $1, %z4 \n" 403 " scd $1, %1 \n" 404 " beqzl $1, 1b \n" 405#ifdef CONFIG_SMP 406 " sync \n" 407#endif 408 "2: \n" 409 " .set pop \n" 410 : "=&r" (retval), "=R" (*m) 411 : "R" (*m), "Jr" (old), "Jr" (new) 412 : "memory"); 413 } else if (cpu_has_llsc) { 414 __asm__ __volatile__( 415 " .set push \n" 416 " .set noat \n" 417 " .set mips3 \n" 418 "1: lld %0, %2 # __cmpxchg_u64 \n" 419 " bne %0, %z3, 2f \n" 420 " move $1, %z4 \n" 421 " scd $1, %1 \n" 422 " beqz $1, 1b \n" 423#ifdef CONFIG_SMP 424 " sync \n" 425#endif 426 "2: \n" 427 " .set pop \n" 428 : "=&r" (retval), "=R" (*m) 429 : "R" (*m), "Jr" (old), "Jr" (new) 430 : "memory"); 431 } else { 432 unsigned long flags; 433 434 local_irq_save(flags); 435 retval = *m; 436 if (retval == old) 437 *m = new; 438 local_irq_restore(flags); /* implies memory barrier */ 439 } 440 441 return retval; 442} 443#else 444extern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels( 445 volatile int * m, unsigned long old, unsigned long new); 446#define __cmpxchg_u64 __cmpxchg_u64_unsupported_on_32bit_kernels 447#endif 448 449/* This function doesn't exist, so you'll get a linker error 450 if something tries to do an invalid cmpxchg(). */ 451extern void __cmpxchg_called_with_bad_pointer(void); 452 453static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old, 454 unsigned long new, int size) 455{ 456 switch (size) { 457 case 4: 458 return __cmpxchg_u32(ptr, old, new); 459 case 8: 460 return __cmpxchg_u64(ptr, old, new); 461 } 462 __cmpxchg_called_with_bad_pointer(); 463 return old; 464} 465 466#define cmpxchg(ptr,old,new) ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr)))) 467 468extern void set_handler (unsigned long offset, void *addr, unsigned long len); 469extern void set_uncached_handler (unsigned long offset, void *addr, unsigned long len); 470extern void *set_vi_handler (int n, void *addr); 471extern void *set_except_vector(int n, void *addr); 472extern unsigned long ebase; 473extern void per_cpu_trap_init(void); 474 475extern NORET_TYPE void die(const char *, struct pt_regs *); 476 477static inline void die_if_kernel(const char *str, struct pt_regs *regs) 478{ 479 if (unlikely(!user_mode(regs))) 480 die(str, regs); 481} 482 483extern int stop_a_enabled; 484 485/* 486 * See include/asm-ia64/system.h; prevents deadlock on SMP 487 * systems. 488 */ 489#define __ARCH_WANT_UNLOCKED_CTXSW 490 491#define arch_align_stack(x) (x) 492 493#endif /* _ASM_SYSTEM_H */