at v3.0-rc1 542 lines 14 kB view raw
1#ifndef __ASM_ARM_SYSTEM_H 2#define __ASM_ARM_SYSTEM_H 3 4#ifdef __KERNEL__ 5 6#define CPU_ARCH_UNKNOWN 0 7#define CPU_ARCH_ARMv3 1 8#define CPU_ARCH_ARMv4 2 9#define CPU_ARCH_ARMv4T 3 10#define CPU_ARCH_ARMv5 4 11#define CPU_ARCH_ARMv5T 5 12#define CPU_ARCH_ARMv5TE 6 13#define CPU_ARCH_ARMv5TEJ 7 14#define CPU_ARCH_ARMv6 8 15#define CPU_ARCH_ARMv7 9 16 17/* 18 * CR1 bits (CP#15 CR1) 19 */ 20#define CR_M (1 << 0) /* MMU enable */ 21#define CR_A (1 << 1) /* Alignment abort enable */ 22#define CR_C (1 << 2) /* Dcache enable */ 23#define CR_W (1 << 3) /* Write buffer enable */ 24#define CR_P (1 << 4) /* 32-bit exception handler */ 25#define CR_D (1 << 5) /* 32-bit data address range */ 26#define CR_L (1 << 6) /* Implementation defined */ 27#define CR_B (1 << 7) /* Big endian */ 28#define CR_S (1 << 8) /* System MMU protection */ 29#define CR_R (1 << 9) /* ROM MMU protection */ 30#define CR_F (1 << 10) /* Implementation defined */ 31#define CR_Z (1 << 11) /* Implementation defined */ 32#define CR_I (1 << 12) /* Icache enable */ 33#define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */ 34#define CR_RR (1 << 14) /* Round Robin cache replacement */ 35#define CR_L4 (1 << 15) /* LDR pc can set T bit */ 36#define CR_DT (1 << 16) 37#define CR_IT (1 << 18) 38#define CR_ST (1 << 19) 39#define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */ 40#define CR_U (1 << 22) /* Unaligned access operation */ 41#define CR_XP (1 << 23) /* Extended page tables */ 42#define CR_VE (1 << 24) /* Vectored interrupts */ 43#define CR_EE (1 << 25) /* Exception (Big) Endian */ 44#define CR_TRE (1 << 28) /* TEX remap enable */ 45#define CR_AFE (1 << 29) /* Access flag enable */ 46#define CR_TE (1 << 30) /* Thumb exception enable */ 47 48/* 49 * This is used to ensure the compiler did actually allocate the register we 50 * asked it for some inline assembly sequences. Apparently we can't trust 51 * the compiler from one version to another so a bit of paranoia won't hurt. 52 * This string is meant to be concatenated with the inline asm string and 53 * will cause compilation to stop on mismatch. 54 * (for details, see gcc PR 15089) 55 */ 56#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" 57 58#ifndef __ASSEMBLY__ 59 60#include <linux/linkage.h> 61#include <linux/irqflags.h> 62 63#include <asm/outercache.h> 64 65#define __exception __attribute__((section(".exception.text"))) 66#ifdef CONFIG_FUNCTION_GRAPH_TRACER 67#define __exception_irq_entry __irq_entry 68#else 69#define __exception_irq_entry __exception 70#endif 71 72struct thread_info; 73struct task_struct; 74 75/* information about the system we're running on */ 76extern unsigned int system_rev; 77extern unsigned int system_serial_low; 78extern unsigned int system_serial_high; 79extern unsigned int mem_fclk_21285; 80 81struct pt_regs; 82 83void die(const char *msg, struct pt_regs *regs, int err); 84 85struct siginfo; 86void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, 87 unsigned long err, unsigned long trap); 88 89void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, 90 struct pt_regs *), 91 int sig, int code, const char *name); 92 93void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, 94 struct pt_regs *), 95 int sig, int code, const char *name); 96 97#define xchg(ptr,x) \ 98 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 99 100extern asmlinkage void __backtrace(void); 101extern asmlinkage void c_backtrace(unsigned long fp, int pmode); 102 103struct mm_struct; 104extern void show_pte(struct mm_struct *mm, unsigned long addr); 105extern void __show_regs(struct pt_regs *); 106 107extern int cpu_architecture(void); 108extern void cpu_init(void); 109 110void arm_machine_restart(char mode, const char *cmd); 111extern void (*arm_pm_restart)(char str, const char *cmd); 112 113#define UDBG_UNDEFINED (1 << 0) 114#define UDBG_SYSCALL (1 << 1) 115#define UDBG_BADABORT (1 << 2) 116#define UDBG_SEGV (1 << 3) 117#define UDBG_BUS (1 << 4) 118 119extern unsigned int user_debug; 120 121#if __LINUX_ARM_ARCH__ >= 4 122#define vectors_high() (cr_alignment & CR_V) 123#else 124#define vectors_high() (0) 125#endif 126 127#if __LINUX_ARM_ARCH__ >= 7 || \ 128 (__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K)) 129#define sev() __asm__ __volatile__ ("sev" : : : "memory") 130#define wfe() __asm__ __volatile__ ("wfe" : : : "memory") 131#define wfi() __asm__ __volatile__ ("wfi" : : : "memory") 132#endif 133 134#if __LINUX_ARM_ARCH__ >= 7 135#define isb() __asm__ __volatile__ ("isb" : : : "memory") 136#define dsb() __asm__ __volatile__ ("dsb" : : : "memory") 137#define dmb() __asm__ __volatile__ ("dmb" : : : "memory") 138#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6 139#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ 140 : : "r" (0) : "memory") 141#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ 142 : : "r" (0) : "memory") 143#define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ 144 : : "r" (0) : "memory") 145#elif defined(CONFIG_CPU_FA526) 146#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ 147 : : "r" (0) : "memory") 148#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ 149 : : "r" (0) : "memory") 150#define dmb() __asm__ __volatile__ ("" : : : "memory") 151#else 152#define isb() __asm__ __volatile__ ("" : : : "memory") 153#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ 154 : : "r" (0) : "memory") 155#define dmb() __asm__ __volatile__ ("" : : : "memory") 156#endif 157 158#ifdef CONFIG_ARCH_HAS_BARRIERS 159#include <mach/barriers.h> 160#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) 161#define mb() do { dsb(); outer_sync(); } while (0) 162#define rmb() dsb() 163#define wmb() mb() 164#else 165#include <asm/memory.h> 166#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) 167#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) 168#define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) 169#endif 170 171#ifndef CONFIG_SMP 172#define smp_mb() barrier() 173#define smp_rmb() barrier() 174#define smp_wmb() barrier() 175#else 176#define smp_mb() dmb() 177#define smp_rmb() dmb() 178#define smp_wmb() dmb() 179#endif 180 181#define read_barrier_depends() do { } while(0) 182#define smp_read_barrier_depends() do { } while(0) 183 184#define set_mb(var, value) do { var = value; smp_mb(); } while (0) 185#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); 186 187extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ 188extern unsigned long cr_alignment; /* defined in entry-armv.S */ 189 190static inline unsigned int get_cr(void) 191{ 192 unsigned int val; 193 asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc"); 194 return val; 195} 196 197static inline void set_cr(unsigned int val) 198{ 199 asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR" 200 : : "r" (val) : "cc"); 201 isb(); 202} 203 204#ifndef CONFIG_SMP 205extern void adjust_cr(unsigned long mask, unsigned long set); 206#endif 207 208#define CPACC_FULL(n) (3 << (n * 2)) 209#define CPACC_SVC(n) (1 << (n * 2)) 210#define CPACC_DISABLE(n) (0 << (n * 2)) 211 212static inline unsigned int get_copro_access(void) 213{ 214 unsigned int val; 215 asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access" 216 : "=r" (val) : : "cc"); 217 return val; 218} 219 220static inline void set_copro_access(unsigned int val) 221{ 222 asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access" 223 : : "r" (val) : "cc"); 224 isb(); 225} 226 227/* 228 * switch_mm() may do a full cache flush over the context switch, 229 * so enable interrupts over the context switch to avoid high 230 * latency. 231 */ 232#define __ARCH_WANT_INTERRUPTS_ON_CTXSW 233 234/* 235 * switch_to(prev, next) should switch from task `prev' to `next' 236 * `prev' will never be the same as `next'. schedule() itself 237 * contains the memory barrier to tell GCC not to cache `current'. 238 */ 239extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *); 240 241#define switch_to(prev,next,last) \ 242do { \ 243 last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ 244} while (0) 245 246#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) 247/* 248 * On the StrongARM, "swp" is terminally broken since it bypasses the 249 * cache totally. This means that the cache becomes inconsistent, and, 250 * since we use normal loads/stores as well, this is really bad. 251 * Typically, this causes oopsen in filp_close, but could have other, 252 * more disastrous effects. There are two work-arounds: 253 * 1. Disable interrupts and emulate the atomic swap 254 * 2. Clean the cache, perform atomic swap, flush the cache 255 * 256 * We choose (1) since its the "easiest" to achieve here and is not 257 * dependent on the processor type. 258 * 259 * NOTE that this solution won't work on an SMP system, so explcitly 260 * forbid it here. 261 */ 262#define swp_is_buggy 263#endif 264 265static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) 266{ 267 extern void __bad_xchg(volatile void *, int); 268 unsigned long ret; 269#ifdef swp_is_buggy 270 unsigned long flags; 271#endif 272#if __LINUX_ARM_ARCH__ >= 6 273 unsigned int tmp; 274#endif 275 276 smp_mb(); 277 278 switch (size) { 279#if __LINUX_ARM_ARCH__ >= 6 280 case 1: 281 asm volatile("@ __xchg1\n" 282 "1: ldrexb %0, [%3]\n" 283 " strexb %1, %2, [%3]\n" 284 " teq %1, #0\n" 285 " bne 1b" 286 : "=&r" (ret), "=&r" (tmp) 287 : "r" (x), "r" (ptr) 288 : "memory", "cc"); 289 break; 290 case 4: 291 asm volatile("@ __xchg4\n" 292 "1: ldrex %0, [%3]\n" 293 " strex %1, %2, [%3]\n" 294 " teq %1, #0\n" 295 " bne 1b" 296 : "=&r" (ret), "=&r" (tmp) 297 : "r" (x), "r" (ptr) 298 : "memory", "cc"); 299 break; 300#elif defined(swp_is_buggy) 301#ifdef CONFIG_SMP 302#error SMP is not supported on this platform 303#endif 304 case 1: 305 raw_local_irq_save(flags); 306 ret = *(volatile unsigned char *)ptr; 307 *(volatile unsigned char *)ptr = x; 308 raw_local_irq_restore(flags); 309 break; 310 311 case 4: 312 raw_local_irq_save(flags); 313 ret = *(volatile unsigned long *)ptr; 314 *(volatile unsigned long *)ptr = x; 315 raw_local_irq_restore(flags); 316 break; 317#else 318 case 1: 319 asm volatile("@ __xchg1\n" 320 " swpb %0, %1, [%2]" 321 : "=&r" (ret) 322 : "r" (x), "r" (ptr) 323 : "memory", "cc"); 324 break; 325 case 4: 326 asm volatile("@ __xchg4\n" 327 " swp %0, %1, [%2]" 328 : "=&r" (ret) 329 : "r" (x), "r" (ptr) 330 : "memory", "cc"); 331 break; 332#endif 333 default: 334 __bad_xchg(ptr, size), ret = 0; 335 break; 336 } 337 smp_mb(); 338 339 return ret; 340} 341 342extern void disable_hlt(void); 343extern void enable_hlt(void); 344 345void cpu_idle_wait(void); 346 347#include <asm-generic/cmpxchg-local.h> 348 349#if __LINUX_ARM_ARCH__ < 6 350/* min ARCH < ARMv6 */ 351 352#ifdef CONFIG_SMP 353#error "SMP is not supported on this platform" 354#endif 355 356/* 357 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make 358 * them available. 359 */ 360#define cmpxchg_local(ptr, o, n) \ 361 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ 362 (unsigned long)(n), sizeof(*(ptr)))) 363#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) 364 365#ifndef CONFIG_SMP 366#include <asm-generic/cmpxchg.h> 367#endif 368 369#else /* min ARCH >= ARMv6 */ 370 371extern void __bad_cmpxchg(volatile void *ptr, int size); 372 373/* 374 * cmpxchg only support 32-bits operands on ARMv6. 375 */ 376 377static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, 378 unsigned long new, int size) 379{ 380 unsigned long oldval, res; 381 382 switch (size) { 383#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ 384 case 1: 385 do { 386 asm volatile("@ __cmpxchg1\n" 387 " ldrexb %1, [%2]\n" 388 " mov %0, #0\n" 389 " teq %1, %3\n" 390 " strexbeq %0, %4, [%2]\n" 391 : "=&r" (res), "=&r" (oldval) 392 : "r" (ptr), "Ir" (old), "r" (new) 393 : "memory", "cc"); 394 } while (res); 395 break; 396 case 2: 397 do { 398 asm volatile("@ __cmpxchg1\n" 399 " ldrexh %1, [%2]\n" 400 " mov %0, #0\n" 401 " teq %1, %3\n" 402 " strexheq %0, %4, [%2]\n" 403 : "=&r" (res), "=&r" (oldval) 404 : "r" (ptr), "Ir" (old), "r" (new) 405 : "memory", "cc"); 406 } while (res); 407 break; 408#endif 409 case 4: 410 do { 411 asm volatile("@ __cmpxchg4\n" 412 " ldrex %1, [%2]\n" 413 " mov %0, #0\n" 414 " teq %1, %3\n" 415 " strexeq %0, %4, [%2]\n" 416 : "=&r" (res), "=&r" (oldval) 417 : "r" (ptr), "Ir" (old), "r" (new) 418 : "memory", "cc"); 419 } while (res); 420 break; 421 default: 422 __bad_cmpxchg(ptr, size); 423 oldval = 0; 424 } 425 426 return oldval; 427} 428 429static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, 430 unsigned long new, int size) 431{ 432 unsigned long ret; 433 434 smp_mb(); 435 ret = __cmpxchg(ptr, old, new, size); 436 smp_mb(); 437 438 return ret; 439} 440 441#define cmpxchg(ptr,o,n) \ 442 ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \ 443 (unsigned long)(o), \ 444 (unsigned long)(n), \ 445 sizeof(*(ptr)))) 446 447static inline unsigned long __cmpxchg_local(volatile void *ptr, 448 unsigned long old, 449 unsigned long new, int size) 450{ 451 unsigned long ret; 452 453 switch (size) { 454#ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */ 455 case 1: 456 case 2: 457 ret = __cmpxchg_local_generic(ptr, old, new, size); 458 break; 459#endif 460 default: 461 ret = __cmpxchg(ptr, old, new, size); 462 } 463 464 return ret; 465} 466 467#define cmpxchg_local(ptr,o,n) \ 468 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \ 469 (unsigned long)(o), \ 470 (unsigned long)(n), \ 471 sizeof(*(ptr)))) 472 473#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ 474 475/* 476 * Note : ARMv7-M (currently unsupported by Linux) does not support 477 * ldrexd/strexd. If ARMv7-M is ever supported by the Linux kernel, it should 478 * not be allowed to use __cmpxchg64. 479 */ 480static inline unsigned long long __cmpxchg64(volatile void *ptr, 481 unsigned long long old, 482 unsigned long long new) 483{ 484 register unsigned long long oldval asm("r0"); 485 register unsigned long long __old asm("r2") = old; 486 register unsigned long long __new asm("r4") = new; 487 unsigned long res; 488 489 do { 490 asm volatile( 491 " @ __cmpxchg8\n" 492 " ldrexd %1, %H1, [%2]\n" 493 " mov %0, #0\n" 494 " teq %1, %3\n" 495 " teqeq %H1, %H3\n" 496 " strexdeq %0, %4, %H4, [%2]\n" 497 : "=&r" (res), "=&r" (oldval) 498 : "r" (ptr), "Ir" (__old), "r" (__new) 499 : "memory", "cc"); 500 } while (res); 501 502 return oldval; 503} 504 505static inline unsigned long long __cmpxchg64_mb(volatile void *ptr, 506 unsigned long long old, 507 unsigned long long new) 508{ 509 unsigned long long ret; 510 511 smp_mb(); 512 ret = __cmpxchg64(ptr, old, new); 513 smp_mb(); 514 515 return ret; 516} 517 518#define cmpxchg64(ptr,o,n) \ 519 ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \ 520 (unsigned long long)(o), \ 521 (unsigned long long)(n))) 522 523#define cmpxchg64_local(ptr,o,n) \ 524 ((__typeof__(*(ptr)))__cmpxchg64((ptr), \ 525 (unsigned long long)(o), \ 526 (unsigned long long)(n))) 527 528#else /* min ARCH = ARMv6 */ 529 530#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) 531 532#endif 533 534#endif /* __LINUX_ARM_ARCH__ >= 6 */ 535 536#endif /* __ASSEMBLY__ */ 537 538#define arch_align_stack(x) (x) 539 540#endif /* __KERNEL__ */ 541 542#endif