Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.37-rc2 528 lines 14 kB view raw
1#ifndef __ASM_ARM_SYSTEM_H 2#define __ASM_ARM_SYSTEM_H 3 4#ifdef __KERNEL__ 5 6#define CPU_ARCH_UNKNOWN 0 7#define CPU_ARCH_ARMv3 1 8#define CPU_ARCH_ARMv4 2 9#define CPU_ARCH_ARMv4T 3 10#define CPU_ARCH_ARMv5 4 11#define CPU_ARCH_ARMv5T 5 12#define CPU_ARCH_ARMv5TE 6 13#define CPU_ARCH_ARMv5TEJ 7 14#define CPU_ARCH_ARMv6 8 15#define CPU_ARCH_ARMv7 9 16 17/* 18 * CR1 bits (CP#15 CR1) 19 */ 20#define CR_M (1 << 0) /* MMU enable */ 21#define CR_A (1 << 1) /* Alignment abort enable */ 22#define CR_C (1 << 2) /* Dcache enable */ 23#define CR_W (1 << 3) /* Write buffer enable */ 24#define CR_P (1 << 4) /* 32-bit exception handler */ 25#define CR_D (1 << 5) /* 32-bit data address range */ 26#define CR_L (1 << 6) /* Implementation defined */ 27#define CR_B (1 << 7) /* Big endian */ 28#define CR_S (1 << 8) /* System MMU protection */ 29#define CR_R (1 << 9) /* ROM MMU protection */ 30#define CR_F (1 << 10) /* Implementation defined */ 31#define CR_Z (1 << 11) /* Implementation defined */ 32#define CR_I (1 << 12) /* Icache enable */ 33#define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */ 34#define CR_RR (1 << 14) /* Round Robin cache replacement */ 35#define CR_L4 (1 << 15) /* LDR pc can set T bit */ 36#define CR_DT (1 << 16) 37#define CR_IT (1 << 18) 38#define CR_ST (1 << 19) 39#define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */ 40#define CR_U (1 << 22) /* Unaligned access operation */ 41#define CR_XP (1 << 23) /* Extended page tables */ 42#define CR_VE (1 << 24) /* Vectored interrupts */ 43#define CR_EE (1 << 25) /* Exception (Big) Endian */ 44#define CR_TRE (1 << 28) /* TEX remap enable */ 45#define CR_AFE (1 << 29) /* Access flag enable */ 46#define CR_TE (1 << 30) /* Thumb exception enable */ 47 48/* 49 * This is used to ensure the compiler did actually allocate the register we 50 * asked it for some inline assembly sequences. Apparently we can't trust 51 * the compiler from one version to another so a bit of paranoia won't hurt. 52 * This string is meant to be concatenated with the inline asm string and 53 * will cause compilation to stop on mismatch. 54 * (for details, see gcc PR 15089) 55 */ 56#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" 57 58#ifndef __ASSEMBLY__ 59 60#include <linux/linkage.h> 61#include <linux/irqflags.h> 62 63#include <asm/outercache.h> 64 65#define __exception __attribute__((section(".exception.text"))) 66 67struct thread_info; 68struct task_struct; 69 70/* information about the system we're running on */ 71extern unsigned int system_rev; 72extern unsigned int system_serial_low; 73extern unsigned int system_serial_high; 74extern unsigned int mem_fclk_21285; 75 76struct pt_regs; 77 78void die(const char *msg, struct pt_regs *regs, int err); 79 80struct siginfo; 81void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, 82 unsigned long err, unsigned long trap); 83 84void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, 85 struct pt_regs *), 86 int sig, int code, const char *name); 87 88void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, 89 struct pt_regs *), 90 int sig, int code, const char *name); 91 92#define xchg(ptr,x) \ 93 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 94 95extern asmlinkage void __backtrace(void); 96extern asmlinkage void c_backtrace(unsigned long fp, int pmode); 97 98struct mm_struct; 99extern void show_pte(struct mm_struct *mm, unsigned long addr); 100extern void __show_regs(struct pt_regs *); 101 102extern int cpu_architecture(void); 103extern void cpu_init(void); 104 105void arm_machine_restart(char mode, const char *cmd); 106extern void (*arm_pm_restart)(char str, const char *cmd); 107 108#define UDBG_UNDEFINED (1 << 0) 109#define UDBG_SYSCALL (1 << 1) 110#define UDBG_BADABORT (1 << 2) 111#define UDBG_SEGV (1 << 3) 112#define UDBG_BUS (1 << 4) 113 114extern unsigned int user_debug; 115 116#if __LINUX_ARM_ARCH__ >= 4 117#define vectors_high() (cr_alignment & CR_V) 118#else 119#define vectors_high() (0) 120#endif 121 122#if __LINUX_ARM_ARCH__ >= 7 123#define isb() __asm__ __volatile__ ("isb" : : : "memory") 124#define dsb() __asm__ __volatile__ ("dsb" : : : "memory") 125#define dmb() __asm__ __volatile__ ("dmb" : : : "memory") 126#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6 127#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ 128 : : "r" (0) : "memory") 129#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ 130 : : "r" (0) : "memory") 131#define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ 132 : : "r" (0) : "memory") 133#elif defined(CONFIG_CPU_FA526) 134#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ 135 : : "r" (0) : "memory") 136#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ 137 : : "r" (0) : "memory") 138#define dmb() __asm__ __volatile__ ("" : : : "memory") 139#else 140#define isb() __asm__ __volatile__ ("" : : : "memory") 141#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ 142 : : "r" (0) : "memory") 143#define dmb() __asm__ __volatile__ ("" : : : "memory") 144#endif 145 146#ifdef CONFIG_ARCH_HAS_BARRIERS 147#include <mach/barriers.h> 148#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) 149#define mb() do { dsb(); outer_sync(); } while (0) 150#define rmb() dmb() 151#define wmb() mb() 152#else 153#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) 154#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) 155#define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) 156#endif 157 158#ifndef CONFIG_SMP 159#define smp_mb() barrier() 160#define smp_rmb() barrier() 161#define smp_wmb() barrier() 162#else 163#define smp_mb() dmb() 164#define smp_rmb() dmb() 165#define smp_wmb() dmb() 166#endif 167 168#define read_barrier_depends() do { } while(0) 169#define smp_read_barrier_depends() do { } while(0) 170 171#define set_mb(var, value) do { var = value; smp_mb(); } while (0) 172#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); 173 174extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ 175extern unsigned long cr_alignment; /* defined in entry-armv.S */ 176 177static inline unsigned int get_cr(void) 178{ 179 unsigned int val; 180 asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc"); 181 return val; 182} 183 184static inline void set_cr(unsigned int val) 185{ 186 asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR" 187 : : "r" (val) : "cc"); 188 isb(); 189} 190 191#ifndef CONFIG_SMP 192extern void adjust_cr(unsigned long mask, unsigned long set); 193#endif 194 195#define CPACC_FULL(n) (3 << (n * 2)) 196#define CPACC_SVC(n) (1 << (n * 2)) 197#define CPACC_DISABLE(n) (0 << (n * 2)) 198 199static inline unsigned int get_copro_access(void) 200{ 201 unsigned int val; 202 asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access" 203 : "=r" (val) : : "cc"); 204 return val; 205} 206 207static inline void set_copro_access(unsigned int val) 208{ 209 asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access" 210 : : "r" (val) : "cc"); 211 isb(); 212} 213 214/* 215 * switch_mm() may do a full cache flush over the context switch, 216 * so enable interrupts over the context switch to avoid high 217 * latency. 218 */ 219#define __ARCH_WANT_INTERRUPTS_ON_CTXSW 220 221/* 222 * switch_to(prev, next) should switch from task `prev' to `next' 223 * `prev' will never be the same as `next'. schedule() itself 224 * contains the memory barrier to tell GCC not to cache `current'. 225 */ 226extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *); 227 228#define switch_to(prev,next,last) \ 229do { \ 230 last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ 231} while (0) 232 233#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) 234/* 235 * On the StrongARM, "swp" is terminally broken since it bypasses the 236 * cache totally. This means that the cache becomes inconsistent, and, 237 * since we use normal loads/stores as well, this is really bad. 238 * Typically, this causes oopsen in filp_close, but could have other, 239 * more disasterous effects. There are two work-arounds: 240 * 1. Disable interrupts and emulate the atomic swap 241 * 2. Clean the cache, perform atomic swap, flush the cache 242 * 243 * We choose (1) since its the "easiest" to achieve here and is not 244 * dependent on the processor type. 245 * 246 * NOTE that this solution won't work on an SMP system, so explcitly 247 * forbid it here. 248 */ 249#define swp_is_buggy 250#endif 251 252static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) 253{ 254 extern void __bad_xchg(volatile void *, int); 255 unsigned long ret; 256#ifdef swp_is_buggy 257 unsigned long flags; 258#endif 259#if __LINUX_ARM_ARCH__ >= 6 260 unsigned int tmp; 261#endif 262 263 smp_mb(); 264 265 switch (size) { 266#if __LINUX_ARM_ARCH__ >= 6 267 case 1: 268 asm volatile("@ __xchg1\n" 269 "1: ldrexb %0, [%3]\n" 270 " strexb %1, %2, [%3]\n" 271 " teq %1, #0\n" 272 " bne 1b" 273 : "=&r" (ret), "=&r" (tmp) 274 : "r" (x), "r" (ptr) 275 : "memory", "cc"); 276 break; 277 case 4: 278 asm volatile("@ __xchg4\n" 279 "1: ldrex %0, [%3]\n" 280 " strex %1, %2, [%3]\n" 281 " teq %1, #0\n" 282 " bne 1b" 283 : "=&r" (ret), "=&r" (tmp) 284 : "r" (x), "r" (ptr) 285 : "memory", "cc"); 286 break; 287#elif defined(swp_is_buggy) 288#ifdef CONFIG_SMP 289#error SMP is not supported on this platform 290#endif 291 case 1: 292 raw_local_irq_save(flags); 293 ret = *(volatile unsigned char *)ptr; 294 *(volatile unsigned char *)ptr = x; 295 raw_local_irq_restore(flags); 296 break; 297 298 case 4: 299 raw_local_irq_save(flags); 300 ret = *(volatile unsigned long *)ptr; 301 *(volatile unsigned long *)ptr = x; 302 raw_local_irq_restore(flags); 303 break; 304#else 305 case 1: 306 asm volatile("@ __xchg1\n" 307 " swpb %0, %1, [%2]" 308 : "=&r" (ret) 309 : "r" (x), "r" (ptr) 310 : "memory", "cc"); 311 break; 312 case 4: 313 asm volatile("@ __xchg4\n" 314 " swp %0, %1, [%2]" 315 : "=&r" (ret) 316 : "r" (x), "r" (ptr) 317 : "memory", "cc"); 318 break; 319#endif 320 default: 321 __bad_xchg(ptr, size), ret = 0; 322 break; 323 } 324 smp_mb(); 325 326 return ret; 327} 328 329extern void disable_hlt(void); 330extern void enable_hlt(void); 331 332void cpu_idle_wait(void); 333 334#include <asm-generic/cmpxchg-local.h> 335 336#if __LINUX_ARM_ARCH__ < 6 337 338#ifdef CONFIG_SMP 339#error "SMP is not supported on this platform" 340#endif 341 342/* 343 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make 344 * them available. 345 */ 346#define cmpxchg_local(ptr, o, n) \ 347 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ 348 (unsigned long)(n), sizeof(*(ptr)))) 349#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) 350 351#ifndef CONFIG_SMP 352#include <asm-generic/cmpxchg.h> 353#endif 354 355#else /* __LINUX_ARM_ARCH__ >= 6 */ 356 357extern void __bad_cmpxchg(volatile void *ptr, int size); 358 359/* 360 * cmpxchg only support 32-bits operands on ARMv6. 361 */ 362 363static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, 364 unsigned long new, int size) 365{ 366 unsigned long oldval, res; 367 368 switch (size) { 369#ifdef CONFIG_CPU_32v6K 370 case 1: 371 do { 372 asm volatile("@ __cmpxchg1\n" 373 " ldrexb %1, [%2]\n" 374 " mov %0, #0\n" 375 " teq %1, %3\n" 376 " strexbeq %0, %4, [%2]\n" 377 : "=&r" (res), "=&r" (oldval) 378 : "r" (ptr), "Ir" (old), "r" (new) 379 : "memory", "cc"); 380 } while (res); 381 break; 382 case 2: 383 do { 384 asm volatile("@ __cmpxchg1\n" 385 " ldrexh %1, [%2]\n" 386 " mov %0, #0\n" 387 " teq %1, %3\n" 388 " strexheq %0, %4, [%2]\n" 389 : "=&r" (res), "=&r" (oldval) 390 : "r" (ptr), "Ir" (old), "r" (new) 391 : "memory", "cc"); 392 } while (res); 393 break; 394#endif /* CONFIG_CPU_32v6K */ 395 case 4: 396 do { 397 asm volatile("@ __cmpxchg4\n" 398 " ldrex %1, [%2]\n" 399 " mov %0, #0\n" 400 " teq %1, %3\n" 401 " strexeq %0, %4, [%2]\n" 402 : "=&r" (res), "=&r" (oldval) 403 : "r" (ptr), "Ir" (old), "r" (new) 404 : "memory", "cc"); 405 } while (res); 406 break; 407 default: 408 __bad_cmpxchg(ptr, size); 409 oldval = 0; 410 } 411 412 return oldval; 413} 414 415static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, 416 unsigned long new, int size) 417{ 418 unsigned long ret; 419 420 smp_mb(); 421 ret = __cmpxchg(ptr, old, new, size); 422 smp_mb(); 423 424 return ret; 425} 426 427#define cmpxchg(ptr,o,n) \ 428 ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \ 429 (unsigned long)(o), \ 430 (unsigned long)(n), \ 431 sizeof(*(ptr)))) 432 433static inline unsigned long __cmpxchg_local(volatile void *ptr, 434 unsigned long old, 435 unsigned long new, int size) 436{ 437 unsigned long ret; 438 439 switch (size) { 440#ifndef CONFIG_CPU_32v6K 441 case 1: 442 case 2: 443 ret = __cmpxchg_local_generic(ptr, old, new, size); 444 break; 445#endif /* !CONFIG_CPU_32v6K */ 446 default: 447 ret = __cmpxchg(ptr, old, new, size); 448 } 449 450 return ret; 451} 452 453#define cmpxchg_local(ptr,o,n) \ 454 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \ 455 (unsigned long)(o), \ 456 (unsigned long)(n), \ 457 sizeof(*(ptr)))) 458 459#ifdef CONFIG_CPU_32v6K 460 461/* 462 * Note : ARMv7-M (currently unsupported by Linux) does not support 463 * ldrexd/strexd. If ARMv7-M is ever supported by the Linux kernel, it should 464 * not be allowed to use __cmpxchg64. 465 */ 466static inline unsigned long long __cmpxchg64(volatile void *ptr, 467 unsigned long long old, 468 unsigned long long new) 469{ 470 register unsigned long long oldval asm("r0"); 471 register unsigned long long __old asm("r2") = old; 472 register unsigned long long __new asm("r4") = new; 473 unsigned long res; 474 475 do { 476 asm volatile( 477 " @ __cmpxchg8\n" 478 " ldrexd %1, %H1, [%2]\n" 479 " mov %0, #0\n" 480 " teq %1, %3\n" 481 " teqeq %H1, %H3\n" 482 " strexdeq %0, %4, %H4, [%2]\n" 483 : "=&r" (res), "=&r" (oldval) 484 : "r" (ptr), "Ir" (__old), "r" (__new) 485 : "memory", "cc"); 486 } while (res); 487 488 return oldval; 489} 490 491static inline unsigned long long __cmpxchg64_mb(volatile void *ptr, 492 unsigned long long old, 493 unsigned long long new) 494{ 495 unsigned long long ret; 496 497 smp_mb(); 498 ret = __cmpxchg64(ptr, old, new); 499 smp_mb(); 500 501 return ret; 502} 503 504#define cmpxchg64(ptr,o,n) \ 505 ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \ 506 (unsigned long long)(o), \ 507 (unsigned long long)(n))) 508 509#define cmpxchg64_local(ptr,o,n) \ 510 ((__typeof__(*(ptr)))__cmpxchg64((ptr), \ 511 (unsigned long long)(o), \ 512 (unsigned long long)(n))) 513 514#else /* !CONFIG_CPU_32v6K */ 515 516#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) 517 518#endif /* CONFIG_CPU_32v6K */ 519 520#endif /* __LINUX_ARM_ARCH__ >= 6 */ 521 522#endif /* __ASSEMBLY__ */ 523 524#define arch_align_stack(x) (x) 525 526#endif /* __KERNEL__ */ 527 528#endif