at v2.6.26-rc4 388 lines 11 kB view raw
1#ifndef __ASM_ARM_SYSTEM_H 2#define __ASM_ARM_SYSTEM_H 3 4#ifdef __KERNEL__ 5 6#include <asm/memory.h> 7 8#define CPU_ARCH_UNKNOWN 0 9#define CPU_ARCH_ARMv3 1 10#define CPU_ARCH_ARMv4 2 11#define CPU_ARCH_ARMv4T 3 12#define CPU_ARCH_ARMv5 4 13#define CPU_ARCH_ARMv5T 5 14#define CPU_ARCH_ARMv5TE 6 15#define CPU_ARCH_ARMv5TEJ 7 16#define CPU_ARCH_ARMv6 8 17#define CPU_ARCH_ARMv7 9 18 19/* 20 * CR1 bits (CP#15 CR1) 21 */ 22#define CR_M (1 << 0) /* MMU enable */ 23#define CR_A (1 << 1) /* Alignment abort enable */ 24#define CR_C (1 << 2) /* Dcache enable */ 25#define CR_W (1 << 3) /* Write buffer enable */ 26#define CR_P (1 << 4) /* 32-bit exception handler */ 27#define CR_D (1 << 5) /* 32-bit data address range */ 28#define CR_L (1 << 6) /* Implementation defined */ 29#define CR_B (1 << 7) /* Big endian */ 30#define CR_S (1 << 8) /* System MMU protection */ 31#define CR_R (1 << 9) /* ROM MMU protection */ 32#define CR_F (1 << 10) /* Implementation defined */ 33#define CR_Z (1 << 11) /* Implementation defined */ 34#define CR_I (1 << 12) /* Icache enable */ 35#define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */ 36#define CR_RR (1 << 14) /* Round Robin cache replacement */ 37#define CR_L4 (1 << 15) /* LDR pc can set T bit */ 38#define CR_DT (1 << 16) 39#define CR_IT (1 << 18) 40#define CR_ST (1 << 19) 41#define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */ 42#define CR_U (1 << 22) /* Unaligned access operation */ 43#define CR_XP (1 << 23) /* Extended page tables */ 44#define CR_VE (1 << 24) /* Vectored interrupts */ 45 46#define CPUID_ID 0 47#define CPUID_CACHETYPE 1 48#define CPUID_TCM 2 49#define CPUID_TLBTYPE 3 50 51/* 52 * This is used to ensure the compiler did actually allocate the register we 53 * asked it for some inline assembly sequences. Apparently we can't trust 54 * the compiler from one version to another so a bit of paranoia won't hurt. 55 * This string is meant to be concatenated with the inline asm string and 56 * will cause compilation to stop on mismatch. 57 * (for details, see gcc PR 15089) 58 */ 59#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" 60 61#ifndef __ASSEMBLY__ 62 63#include <linux/linkage.h> 64#include <linux/stringify.h> 65#include <linux/irqflags.h> 66 67#ifdef CONFIG_CPU_CP15 68#define read_cpuid(reg) \ 69 ({ \ 70 unsigned int __val; \ 71 asm("mrc p15, 0, %0, c0, c0, " __stringify(reg) \ 72 : "=r" (__val) \ 73 : \ 74 : "cc"); \ 75 __val; \ 76 }) 77#else 78extern unsigned int processor_id; 79#define read_cpuid(reg) (processor_id) 80#endif 81 82/* 83 * The CPU ID never changes at run time, so we might as well tell the 84 * compiler that it's constant. Use this function to read the CPU ID 85 * rather than directly reading processor_id or read_cpuid() directly. 86 */ 87static inline unsigned int read_cpuid_id(void) __attribute_const__; 88 89static inline unsigned int read_cpuid_id(void) 90{ 91 return read_cpuid(CPUID_ID); 92} 93 94#define __exception __attribute__((section(".exception.text"))) 95 96struct thread_info; 97struct task_struct; 98 99/* information about the system we're running on */ 100extern unsigned int system_rev; 101extern unsigned int system_serial_low; 102extern unsigned int system_serial_high; 103extern unsigned int mem_fclk_21285; 104 105struct pt_regs; 106 107void die(const char *msg, struct pt_regs *regs, int err) 108 __attribute__((noreturn)); 109 110struct siginfo; 111void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, 112 unsigned long err, unsigned long trap); 113 114void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, 115 struct pt_regs *), 116 int sig, const char *name); 117 118#define xchg(ptr,x) \ 119 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 120 121extern asmlinkage void __backtrace(void); 122extern asmlinkage void c_backtrace(unsigned long fp, int pmode); 123 124struct mm_struct; 125extern void show_pte(struct mm_struct *mm, unsigned long addr); 126extern void __show_regs(struct pt_regs *); 127 128extern int cpu_architecture(void); 129extern void cpu_init(void); 130 131void arm_machine_restart(char mode); 132extern void (*arm_pm_restart)(char str); 133 134/* 135 * Intel's XScale3 core supports some v6 features (supersections, L2) 136 * but advertises itself as v5 as it does not support the v6 ISA. For 137 * this reason, we need a way to explicitly test for this type of CPU. 138 */ 139#ifndef CONFIG_CPU_XSC3 140#define cpu_is_xsc3() 0 141#else 142static inline int cpu_is_xsc3(void) 143{ 144 extern unsigned int processor_id; 145 146 if ((processor_id & 0xffffe000) == 0x69056000) 147 return 1; 148 149 return 0; 150} 151#endif 152 153#if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3) 154#define cpu_is_xscale() 0 155#else 156#define cpu_is_xscale() 1 157#endif 158 159#define UDBG_UNDEFINED (1 << 0) 160#define UDBG_SYSCALL (1 << 1) 161#define UDBG_BADABORT (1 << 2) 162#define UDBG_SEGV (1 << 3) 163#define UDBG_BUS (1 << 4) 164 165extern unsigned int user_debug; 166 167#if __LINUX_ARM_ARCH__ >= 4 168#define vectors_high() (cr_alignment & CR_V) 169#else 170#define vectors_high() (0) 171#endif 172 173#if __LINUX_ARM_ARCH__ >= 7 174#define isb() __asm__ __volatile__ ("isb" : : : "memory") 175#define dsb() __asm__ __volatile__ ("dsb" : : : "memory") 176#define dmb() __asm__ __volatile__ ("dmb" : : : "memory") 177#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6 178#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ 179 : : "r" (0) : "memory") 180#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ 181 : : "r" (0) : "memory") 182#define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ 183 : : "r" (0) : "memory") 184#else 185#define isb() __asm__ __volatile__ ("" : : : "memory") 186#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ 187 : : "r" (0) : "memory") 188#define dmb() __asm__ __volatile__ ("" : : : "memory") 189#endif 190 191#ifndef CONFIG_SMP 192#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) 193#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) 194#define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) 195#define smp_mb() barrier() 196#define smp_rmb() barrier() 197#define smp_wmb() barrier() 198#else 199#define mb() dmb() 200#define rmb() dmb() 201#define wmb() dmb() 202#define smp_mb() dmb() 203#define smp_rmb() dmb() 204#define smp_wmb() dmb() 205#endif 206#define read_barrier_depends() do { } while(0) 207#define smp_read_barrier_depends() do { } while(0) 208 209#define set_mb(var, value) do { var = value; smp_mb(); } while (0) 210#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); 211 212extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ 213extern unsigned long cr_alignment; /* defined in entry-armv.S */ 214 215static inline unsigned int get_cr(void) 216{ 217 unsigned int val; 218 asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc"); 219 return val; 220} 221 222static inline void set_cr(unsigned int val) 223{ 224 asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR" 225 : : "r" (val) : "cc"); 226 isb(); 227} 228 229#ifndef CONFIG_SMP 230extern void adjust_cr(unsigned long mask, unsigned long set); 231#endif 232 233#define CPACC_FULL(n) (3 << (n * 2)) 234#define CPACC_SVC(n) (1 << (n * 2)) 235#define CPACC_DISABLE(n) (0 << (n * 2)) 236 237static inline unsigned int get_copro_access(void) 238{ 239 unsigned int val; 240 asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access" 241 : "=r" (val) : : "cc"); 242 return val; 243} 244 245static inline void set_copro_access(unsigned int val) 246{ 247 asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access" 248 : : "r" (val) : "cc"); 249 isb(); 250} 251 252/* 253 * switch_mm() may do a full cache flush over the context switch, 254 * so enable interrupts over the context switch to avoid high 255 * latency. 256 */ 257#define __ARCH_WANT_INTERRUPTS_ON_CTXSW 258 259/* 260 * switch_to(prev, next) should switch from task `prev' to `next' 261 * `prev' will never be the same as `next'. schedule() itself 262 * contains the memory barrier to tell GCC not to cache `current'. 263 */ 264extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *); 265 266#define switch_to(prev,next,last) \ 267do { \ 268 last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ 269} while (0) 270 271#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) 272/* 273 * On the StrongARM, "swp" is terminally broken since it bypasses the 274 * cache totally. This means that the cache becomes inconsistent, and, 275 * since we use normal loads/stores as well, this is really bad. 276 * Typically, this causes oopsen in filp_close, but could have other, 277 * more disasterous effects. There are two work-arounds: 278 * 1. Disable interrupts and emulate the atomic swap 279 * 2. Clean the cache, perform atomic swap, flush the cache 280 * 281 * We choose (1) since its the "easiest" to achieve here and is not 282 * dependent on the processor type. 283 * 284 * NOTE that this solution won't work on an SMP system, so explcitly 285 * forbid it here. 286 */ 287#define swp_is_buggy 288#endif 289 290static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) 291{ 292 extern void __bad_xchg(volatile void *, int); 293 unsigned long ret; 294#ifdef swp_is_buggy 295 unsigned long flags; 296#endif 297#if __LINUX_ARM_ARCH__ >= 6 298 unsigned int tmp; 299#endif 300 301 switch (size) { 302#if __LINUX_ARM_ARCH__ >= 6 303 case 1: 304 asm volatile("@ __xchg1\n" 305 "1: ldrexb %0, [%3]\n" 306 " strexb %1, %2, [%3]\n" 307 " teq %1, #0\n" 308 " bne 1b" 309 : "=&r" (ret), "=&r" (tmp) 310 : "r" (x), "r" (ptr) 311 : "memory", "cc"); 312 break; 313 case 4: 314 asm volatile("@ __xchg4\n" 315 "1: ldrex %0, [%3]\n" 316 " strex %1, %2, [%3]\n" 317 " teq %1, #0\n" 318 " bne 1b" 319 : "=&r" (ret), "=&r" (tmp) 320 : "r" (x), "r" (ptr) 321 : "memory", "cc"); 322 break; 323#elif defined(swp_is_buggy) 324#ifdef CONFIG_SMP 325#error SMP is not supported on this platform 326#endif 327 case 1: 328 raw_local_irq_save(flags); 329 ret = *(volatile unsigned char *)ptr; 330 *(volatile unsigned char *)ptr = x; 331 raw_local_irq_restore(flags); 332 break; 333 334 case 4: 335 raw_local_irq_save(flags); 336 ret = *(volatile unsigned long *)ptr; 337 *(volatile unsigned long *)ptr = x; 338 raw_local_irq_restore(flags); 339 break; 340#else 341 case 1: 342 asm volatile("@ __xchg1\n" 343 " swpb %0, %1, [%2]" 344 : "=&r" (ret) 345 : "r" (x), "r" (ptr) 346 : "memory", "cc"); 347 break; 348 case 4: 349 asm volatile("@ __xchg4\n" 350 " swp %0, %1, [%2]" 351 : "=&r" (ret) 352 : "r" (x), "r" (ptr) 353 : "memory", "cc"); 354 break; 355#endif 356 default: 357 __bad_xchg(ptr, size), ret = 0; 358 break; 359 } 360 361 return ret; 362} 363 364extern void disable_hlt(void); 365extern void enable_hlt(void); 366 367#include <asm-generic/cmpxchg-local.h> 368 369/* 370 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make 371 * them available. 372 */ 373#define cmpxchg_local(ptr, o, n) \ 374 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ 375 (unsigned long)(n), sizeof(*(ptr)))) 376#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) 377 378#ifndef CONFIG_SMP 379#include <asm-generic/cmpxchg.h> 380#endif 381 382#endif /* __ASSEMBLY__ */ 383 384#define arch_align_stack(x) (x) 385 386#endif /* __KERNEL__ */ 387 388#endif