at v3.3-rc2 543 lines 14 kB view raw
1#ifndef __ASM_ARM_SYSTEM_H 2#define __ASM_ARM_SYSTEM_H 3 4#ifdef __KERNEL__ 5 6#define CPU_ARCH_UNKNOWN 0 7#define CPU_ARCH_ARMv3 1 8#define CPU_ARCH_ARMv4 2 9#define CPU_ARCH_ARMv4T 3 10#define CPU_ARCH_ARMv5 4 11#define CPU_ARCH_ARMv5T 5 12#define CPU_ARCH_ARMv5TE 6 13#define CPU_ARCH_ARMv5TEJ 7 14#define CPU_ARCH_ARMv6 8 15#define CPU_ARCH_ARMv7 9 16 17/* 18 * CR1 bits (CP#15 CR1) 19 */ 20#define CR_M (1 << 0) /* MMU enable */ 21#define CR_A (1 << 1) /* Alignment abort enable */ 22#define CR_C (1 << 2) /* Dcache enable */ 23#define CR_W (1 << 3) /* Write buffer enable */ 24#define CR_P (1 << 4) /* 32-bit exception handler */ 25#define CR_D (1 << 5) /* 32-bit data address range */ 26#define CR_L (1 << 6) /* Implementation defined */ 27#define CR_B (1 << 7) /* Big endian */ 28#define CR_S (1 << 8) /* System MMU protection */ 29#define CR_R (1 << 9) /* ROM MMU protection */ 30#define CR_F (1 << 10) /* Implementation defined */ 31#define CR_Z (1 << 11) /* Implementation defined */ 32#define CR_I (1 << 12) /* Icache enable */ 33#define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */ 34#define CR_RR (1 << 14) /* Round Robin cache replacement */ 35#define CR_L4 (1 << 15) /* LDR pc can set T bit */ 36#define CR_DT (1 << 16) 37#define CR_IT (1 << 18) 38#define CR_ST (1 << 19) 39#define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */ 40#define CR_U (1 << 22) /* Unaligned access operation */ 41#define CR_XP (1 << 23) /* Extended page tables */ 42#define CR_VE (1 << 24) /* Vectored interrupts */ 43#define CR_EE (1 << 25) /* Exception (Big) Endian */ 44#define CR_TRE (1 << 28) /* TEX remap enable */ 45#define CR_AFE (1 << 29) /* Access flag enable */ 46#define CR_TE (1 << 30) /* Thumb exception enable */ 47 48/* 49 * This is used to ensure the compiler did actually allocate the register we 50 * asked it for some inline assembly sequences. Apparently we can't trust 51 * the compiler from one version to another so a bit of paranoia won't hurt. 52 * This string is meant to be concatenated with the inline asm string and 53 * will cause compilation to stop on mismatch. 54 * (for details, see gcc PR 15089) 55 */ 56#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" 57 58#ifndef __ASSEMBLY__ 59 60#include <linux/compiler.h> 61#include <linux/linkage.h> 62#include <linux/irqflags.h> 63 64#include <asm/outercache.h> 65 66struct thread_info; 67struct task_struct; 68 69/* information about the system we're running on */ 70extern unsigned int system_rev; 71extern unsigned int system_serial_low; 72extern unsigned int system_serial_high; 73extern unsigned int mem_fclk_21285; 74 75struct pt_regs; 76 77void die(const char *msg, struct pt_regs *regs, int err); 78 79struct siginfo; 80void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, 81 unsigned long err, unsigned long trap); 82 83#ifdef CONFIG_ARM_LPAE 84#define FAULT_CODE_ALIGNMENT 33 85#define FAULT_CODE_DEBUG 34 86#else 87#define FAULT_CODE_ALIGNMENT 1 88#define FAULT_CODE_DEBUG 2 89#endif 90 91void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, 92 struct pt_regs *), 93 int sig, int code, const char *name); 94 95void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, 96 struct pt_regs *), 97 int sig, int code, const char *name); 98 99#define xchg(ptr,x) \ 100 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 101 102extern asmlinkage void c_backtrace(unsigned long fp, int pmode); 103 104struct mm_struct; 105extern void show_pte(struct mm_struct *mm, unsigned long addr); 106extern void __show_regs(struct pt_regs *); 107 108extern int __pure cpu_architecture(void); 109extern void cpu_init(void); 110 111void soft_restart(unsigned long); 112extern void (*arm_pm_restart)(char str, const char *cmd); 113 114#define UDBG_UNDEFINED (1 << 0) 115#define UDBG_SYSCALL (1 << 1) 116#define UDBG_BADABORT (1 << 2) 117#define UDBG_SEGV (1 << 3) 118#define UDBG_BUS (1 << 4) 119 120extern unsigned int user_debug; 121 122#if __LINUX_ARM_ARCH__ >= 4 123#define vectors_high() (cr_alignment & CR_V) 124#else 125#define vectors_high() (0) 126#endif 127 128#if __LINUX_ARM_ARCH__ >= 7 || \ 129 (__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K)) 130#define sev() __asm__ __volatile__ ("sev" : : : "memory") 131#define wfe() __asm__ __volatile__ ("wfe" : : : "memory") 132#define wfi() __asm__ __volatile__ ("wfi" : : : "memory") 133#endif 134 135#if __LINUX_ARM_ARCH__ >= 7 136#define isb() __asm__ __volatile__ ("isb" : : : "memory") 137#define dsb() __asm__ __volatile__ ("dsb" : : : "memory") 138#define dmb() __asm__ __volatile__ ("dmb" : : : "memory") 139#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6 140#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ 141 : : "r" (0) : "memory") 142#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ 143 : : "r" (0) : "memory") 144#define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ 145 : : "r" (0) : "memory") 146#elif defined(CONFIG_CPU_FA526) 147#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ 148 : : "r" (0) : "memory") 149#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ 150 : : "r" (0) : "memory") 151#define dmb() __asm__ __volatile__ ("" : : : "memory") 152#else 153#define isb() __asm__ __volatile__ ("" : : : "memory") 154#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ 155 : : "r" (0) : "memory") 156#define dmb() __asm__ __volatile__ ("" : : : "memory") 157#endif 158 159#ifdef CONFIG_ARCH_HAS_BARRIERS 160#include <mach/barriers.h> 161#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) 162#define mb() do { dsb(); outer_sync(); } while (0) 163#define rmb() dsb() 164#define wmb() mb() 165#else 166#include <asm/memory.h> 167#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) 168#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) 169#define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) 170#endif 171 172#ifndef CONFIG_SMP 173#define smp_mb() barrier() 174#define smp_rmb() barrier() 175#define smp_wmb() barrier() 176#else 177#define smp_mb() dmb() 178#define smp_rmb() dmb() 179#define smp_wmb() dmb() 180#endif 181 182#define read_barrier_depends() do { } while(0) 183#define smp_read_barrier_depends() do { } while(0) 184 185#define set_mb(var, value) do { var = value; smp_mb(); } while (0) 186#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); 187 188extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ 189extern unsigned long cr_alignment; /* defined in entry-armv.S */ 190 191static inline unsigned int get_cr(void) 192{ 193 unsigned int val; 194 asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc"); 195 return val; 196} 197 198static inline void set_cr(unsigned int val) 199{ 200 asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR" 201 : : "r" (val) : "cc"); 202 isb(); 203} 204 205#ifndef CONFIG_SMP 206extern void adjust_cr(unsigned long mask, unsigned long set); 207#endif 208 209#define CPACC_FULL(n) (3 << (n * 2)) 210#define CPACC_SVC(n) (1 << (n * 2)) 211#define CPACC_DISABLE(n) (0 << (n * 2)) 212 213static inline unsigned int get_copro_access(void) 214{ 215 unsigned int val; 216 asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access" 217 : "=r" (val) : : "cc"); 218 return val; 219} 220 221static inline void set_copro_access(unsigned int val) 222{ 223 asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access" 224 : : "r" (val) : "cc"); 225 isb(); 226} 227 228/* 229 * switch_mm() may do a full cache flush over the context switch, 230 * so enable interrupts over the context switch to avoid high 231 * latency. 232 */ 233#define __ARCH_WANT_INTERRUPTS_ON_CTXSW 234 235/* 236 * switch_to(prev, next) should switch from task `prev' to `next' 237 * `prev' will never be the same as `next'. schedule() itself 238 * contains the memory barrier to tell GCC not to cache `current'. 239 */ 240extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *); 241 242#define switch_to(prev,next,last) \ 243do { \ 244 last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ 245} while (0) 246 247#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) 248/* 249 * On the StrongARM, "swp" is terminally broken since it bypasses the 250 * cache totally. This means that the cache becomes inconsistent, and, 251 * since we use normal loads/stores as well, this is really bad. 252 * Typically, this causes oopsen in filp_close, but could have other, 253 * more disastrous effects. There are two work-arounds: 254 * 1. Disable interrupts and emulate the atomic swap 255 * 2. Clean the cache, perform atomic swap, flush the cache 256 * 257 * We choose (1) since its the "easiest" to achieve here and is not 258 * dependent on the processor type. 259 * 260 * NOTE that this solution won't work on an SMP system, so explcitly 261 * forbid it here. 262 */ 263#define swp_is_buggy 264#endif 265 266static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) 267{ 268 extern void __bad_xchg(volatile void *, int); 269 unsigned long ret; 270#ifdef swp_is_buggy 271 unsigned long flags; 272#endif 273#if __LINUX_ARM_ARCH__ >= 6 274 unsigned int tmp; 275#endif 276 277 smp_mb(); 278 279 switch (size) { 280#if __LINUX_ARM_ARCH__ >= 6 281 case 1: 282 asm volatile("@ __xchg1\n" 283 "1: ldrexb %0, [%3]\n" 284 " strexb %1, %2, [%3]\n" 285 " teq %1, #0\n" 286 " bne 1b" 287 : "=&r" (ret), "=&r" (tmp) 288 : "r" (x), "r" (ptr) 289 : "memory", "cc"); 290 break; 291 case 4: 292 asm volatile("@ __xchg4\n" 293 "1: ldrex %0, [%3]\n" 294 " strex %1, %2, [%3]\n" 295 " teq %1, #0\n" 296 " bne 1b" 297 : "=&r" (ret), "=&r" (tmp) 298 : "r" (x), "r" (ptr) 299 : "memory", "cc"); 300 break; 301#elif defined(swp_is_buggy) 302#ifdef CONFIG_SMP 303#error SMP is not supported on this platform 304#endif 305 case 1: 306 raw_local_irq_save(flags); 307 ret = *(volatile unsigned char *)ptr; 308 *(volatile unsigned char *)ptr = x; 309 raw_local_irq_restore(flags); 310 break; 311 312 case 4: 313 raw_local_irq_save(flags); 314 ret = *(volatile unsigned long *)ptr; 315 *(volatile unsigned long *)ptr = x; 316 raw_local_irq_restore(flags); 317 break; 318#else 319 case 1: 320 asm volatile("@ __xchg1\n" 321 " swpb %0, %1, [%2]" 322 : "=&r" (ret) 323 : "r" (x), "r" (ptr) 324 : "memory", "cc"); 325 break; 326 case 4: 327 asm volatile("@ __xchg4\n" 328 " swp %0, %1, [%2]" 329 : "=&r" (ret) 330 : "r" (x), "r" (ptr) 331 : "memory", "cc"); 332 break; 333#endif 334 default: 335 __bad_xchg(ptr, size), ret = 0; 336 break; 337 } 338 smp_mb(); 339 340 return ret; 341} 342 343extern void disable_hlt(void); 344extern void enable_hlt(void); 345 346void cpu_idle_wait(void); 347 348#include <asm-generic/cmpxchg-local.h> 349 350#if __LINUX_ARM_ARCH__ < 6 351/* min ARCH < ARMv6 */ 352 353#ifdef CONFIG_SMP 354#error "SMP is not supported on this platform" 355#endif 356 357/* 358 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make 359 * them available. 360 */ 361#define cmpxchg_local(ptr, o, n) \ 362 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ 363 (unsigned long)(n), sizeof(*(ptr)))) 364#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) 365 366#ifndef CONFIG_SMP 367#include <asm-generic/cmpxchg.h> 368#endif 369 370#else /* min ARCH >= ARMv6 */ 371 372extern void __bad_cmpxchg(volatile void *ptr, int size); 373 374/* 375 * cmpxchg only support 32-bits operands on ARMv6. 376 */ 377 378static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, 379 unsigned long new, int size) 380{ 381 unsigned long oldval, res; 382 383 switch (size) { 384#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ 385 case 1: 386 do { 387 asm volatile("@ __cmpxchg1\n" 388 " ldrexb %1, [%2]\n" 389 " mov %0, #0\n" 390 " teq %1, %3\n" 391 " strexbeq %0, %4, [%2]\n" 392 : "=&r" (res), "=&r" (oldval) 393 : "r" (ptr), "Ir" (old), "r" (new) 394 : "memory", "cc"); 395 } while (res); 396 break; 397 case 2: 398 do { 399 asm volatile("@ __cmpxchg1\n" 400 " ldrexh %1, [%2]\n" 401 " mov %0, #0\n" 402 " teq %1, %3\n" 403 " strexheq %0, %4, [%2]\n" 404 : "=&r" (res), "=&r" (oldval) 405 : "r" (ptr), "Ir" (old), "r" (new) 406 : "memory", "cc"); 407 } while (res); 408 break; 409#endif 410 case 4: 411 do { 412 asm volatile("@ __cmpxchg4\n" 413 " ldrex %1, [%2]\n" 414 " mov %0, #0\n" 415 " teq %1, %3\n" 416 " strexeq %0, %4, [%2]\n" 417 : "=&r" (res), "=&r" (oldval) 418 : "r" (ptr), "Ir" (old), "r" (new) 419 : "memory", "cc"); 420 } while (res); 421 break; 422 default: 423 __bad_cmpxchg(ptr, size); 424 oldval = 0; 425 } 426 427 return oldval; 428} 429 430static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, 431 unsigned long new, int size) 432{ 433 unsigned long ret; 434 435 smp_mb(); 436 ret = __cmpxchg(ptr, old, new, size); 437 smp_mb(); 438 439 return ret; 440} 441 442#define cmpxchg(ptr,o,n) \ 443 ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \ 444 (unsigned long)(o), \ 445 (unsigned long)(n), \ 446 sizeof(*(ptr)))) 447 448static inline unsigned long __cmpxchg_local(volatile void *ptr, 449 unsigned long old, 450 unsigned long new, int size) 451{ 452 unsigned long ret; 453 454 switch (size) { 455#ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */ 456 case 1: 457 case 2: 458 ret = __cmpxchg_local_generic(ptr, old, new, size); 459 break; 460#endif 461 default: 462 ret = __cmpxchg(ptr, old, new, size); 463 } 464 465 return ret; 466} 467 468#define cmpxchg_local(ptr,o,n) \ 469 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \ 470 (unsigned long)(o), \ 471 (unsigned long)(n), \ 472 sizeof(*(ptr)))) 473 474#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ 475 476/* 477 * Note : ARMv7-M (currently unsupported by Linux) does not support 478 * ldrexd/strexd. If ARMv7-M is ever supported by the Linux kernel, it should 479 * not be allowed to use __cmpxchg64. 480 */ 481static inline unsigned long long __cmpxchg64(volatile void *ptr, 482 unsigned long long old, 483 unsigned long long new) 484{ 485 register unsigned long long oldval asm("r0"); 486 register unsigned long long __old asm("r2") = old; 487 register unsigned long long __new asm("r4") = new; 488 unsigned long res; 489 490 do { 491 asm volatile( 492 " @ __cmpxchg8\n" 493 " ldrexd %1, %H1, [%2]\n" 494 " mov %0, #0\n" 495 " teq %1, %3\n" 496 " teqeq %H1, %H3\n" 497 " strexdeq %0, %4, %H4, [%2]\n" 498 : "=&r" (res), "=&r" (oldval) 499 : "r" (ptr), "Ir" (__old), "r" (__new) 500 : "memory", "cc"); 501 } while (res); 502 503 return oldval; 504} 505 506static inline unsigned long long __cmpxchg64_mb(volatile void *ptr, 507 unsigned long long old, 508 unsigned long long new) 509{ 510 unsigned long long ret; 511 512 smp_mb(); 513 ret = __cmpxchg64(ptr, old, new); 514 smp_mb(); 515 516 return ret; 517} 518 519#define cmpxchg64(ptr,o,n) \ 520 ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \ 521 (unsigned long long)(o), \ 522 (unsigned long long)(n))) 523 524#define cmpxchg64_local(ptr,o,n) \ 525 ((__typeof__(*(ptr)))__cmpxchg64((ptr), \ 526 (unsigned long long)(o), \ 527 (unsigned long long)(n))) 528 529#else /* min ARCH = ARMv6 */ 530 531#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) 532 533#endif 534 535#endif /* __LINUX_ARM_ARCH__ >= 6 */ 536 537#endif /* __ASSEMBLY__ */ 538 539#define arch_align_stack(x) (x) 540 541#endif /* __KERNEL__ */ 542 543#endif