at v2.6.21 9.8 kB view raw
1#ifndef __ASM_ARM_SYSTEM_H 2#define __ASM_ARM_SYSTEM_H 3 4#ifdef __KERNEL__ 5 6#include <asm/memory.h> 7 8#define CPU_ARCH_UNKNOWN 0 9#define CPU_ARCH_ARMv3 1 10#define CPU_ARCH_ARMv4 2 11#define CPU_ARCH_ARMv4T 3 12#define CPU_ARCH_ARMv5 4 13#define CPU_ARCH_ARMv5T 5 14#define CPU_ARCH_ARMv5TE 6 15#define CPU_ARCH_ARMv5TEJ 7 16#define CPU_ARCH_ARMv6 8 17 18/* 19 * CR1 bits (CP#15 CR1) 20 */ 21#define CR_M (1 << 0) /* MMU enable */ 22#define CR_A (1 << 1) /* Alignment abort enable */ 23#define CR_C (1 << 2) /* Dcache enable */ 24#define CR_W (1 << 3) /* Write buffer enable */ 25#define CR_P (1 << 4) /* 32-bit exception handler */ 26#define CR_D (1 << 5) /* 32-bit data address range */ 27#define CR_L (1 << 6) /* Implementation defined */ 28#define CR_B (1 << 7) /* Big endian */ 29#define CR_S (1 << 8) /* System MMU protection */ 30#define CR_R (1 << 9) /* ROM MMU protection */ 31#define CR_F (1 << 10) /* Implementation defined */ 32#define CR_Z (1 << 11) /* Implementation defined */ 33#define CR_I (1 << 12) /* Icache enable */ 34#define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */ 35#define CR_RR (1 << 14) /* Round Robin cache replacement */ 36#define CR_L4 (1 << 15) /* LDR pc can set T bit */ 37#define CR_DT (1 << 16) 38#define CR_IT (1 << 18) 39#define CR_ST (1 << 19) 40#define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */ 41#define CR_U (1 << 22) /* Unaligned access operation */ 42#define CR_XP (1 << 23) /* Extended page tables */ 43#define CR_VE (1 << 24) /* Vectored interrupts */ 44 45#define CPUID_ID 0 46#define CPUID_CACHETYPE 1 47#define CPUID_TCM 2 48#define CPUID_TLBTYPE 3 49 50#ifdef CONFIG_CPU_CP15 51#define read_cpuid(reg) \ 52 ({ \ 53 unsigned int __val; \ 54 asm("mrc p15, 0, %0, c0, c0, " __stringify(reg) \ 55 : "=r" (__val) \ 56 : \ 57 : "cc"); \ 58 __val; \ 59 }) 60#else 61#define read_cpuid(reg) (processor_id) 62#endif 63 64/* 65 * This is used to ensure the compiler did actually allocate the register we 66 * asked it for some inline assembly sequences. Apparently we can't trust 67 * the compiler from one version to another so a bit of paranoia won't hurt. 68 * This string is meant to be concatenated with the inline asm string and 69 * will cause compilation to stop on mismatch. 70 * (for details, see gcc PR 15089) 71 */ 72#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" 73 74#ifndef __ASSEMBLY__ 75 76#include <linux/linkage.h> 77#include <linux/irqflags.h> 78 79struct thread_info; 80struct task_struct; 81 82/* information about the system we're running on */ 83extern unsigned int system_rev; 84extern unsigned int system_serial_low; 85extern unsigned int system_serial_high; 86extern unsigned int mem_fclk_21285; 87 88struct pt_regs; 89 90void die(const char *msg, struct pt_regs *regs, int err) 91 __attribute__((noreturn)); 92 93struct siginfo; 94void notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, 95 unsigned long err, unsigned long trap); 96 97void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, 98 struct pt_regs *), 99 int sig, const char *name); 100 101#define xchg(ptr,x) \ 102 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 103 104#define tas(ptr) (xchg((ptr),1)) 105 106extern asmlinkage void __backtrace(void); 107extern asmlinkage void c_backtrace(unsigned long fp, int pmode); 108 109struct mm_struct; 110extern void show_pte(struct mm_struct *mm, unsigned long addr); 111extern void __show_regs(struct pt_regs *); 112 113extern int cpu_architecture(void); 114extern void cpu_init(void); 115 116void arm_machine_restart(char mode); 117extern void (*arm_pm_restart)(char str); 118 119/* 120 * Intel's XScale3 core supports some v6 features (supersections, L2) 121 * but advertises itself as v5 as it does not support the v6 ISA. For 122 * this reason, we need a way to explicitly test for this type of CPU. 123 */ 124#ifndef CONFIG_CPU_XSC3 125#define cpu_is_xsc3() 0 126#else 127static inline int cpu_is_xsc3(void) 128{ 129 extern unsigned int processor_id; 130 131 if ((processor_id & 0xffffe000) == 0x69056000) 132 return 1; 133 134 return 0; 135} 136#endif 137 138#if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3) 139#define cpu_is_xscale() 0 140#else 141#define cpu_is_xscale() 1 142#endif 143 144#define UDBG_UNDEFINED (1 << 0) 145#define UDBG_SYSCALL (1 << 1) 146#define UDBG_BADABORT (1 << 2) 147#define UDBG_SEGV (1 << 3) 148#define UDBG_BUS (1 << 4) 149 150extern unsigned int user_debug; 151 152#if __LINUX_ARM_ARCH__ >= 4 153#define vectors_high() (cr_alignment & CR_V) 154#else 155#define vectors_high() (0) 156#endif 157 158#if defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ >= 6 159#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ 160 : : "r" (0) : "memory") 161#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ 162 : : "r" (0) : "memory") 163#define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ 164 : : "r" (0) : "memory") 165#else 166#define isb() __asm__ __volatile__ ("" : : : "memory") 167#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ 168 : : "r" (0) : "memory") 169#define dmb() __asm__ __volatile__ ("" : : : "memory") 170#endif 171 172#ifndef CONFIG_SMP 173#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) 174#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) 175#define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) 176#define smp_mb() barrier() 177#define smp_rmb() barrier() 178#define smp_wmb() barrier() 179#else 180#define mb() dmb() 181#define rmb() dmb() 182#define wmb() dmb() 183#define smp_mb() dmb() 184#define smp_rmb() dmb() 185#define smp_wmb() dmb() 186#endif 187#define read_barrier_depends() do { } while(0) 188#define smp_read_barrier_depends() do { } while(0) 189 190#define set_mb(var, value) do { var = value; smp_mb(); } while (0) 191#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); 192 193extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ 194extern unsigned long cr_alignment; /* defined in entry-armv.S */ 195 196static inline unsigned int get_cr(void) 197{ 198 unsigned int val; 199 asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc"); 200 return val; 201} 202 203static inline void set_cr(unsigned int val) 204{ 205 asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR" 206 : : "r" (val) : "cc"); 207 isb(); 208} 209 210#ifndef CONFIG_SMP 211extern void adjust_cr(unsigned long mask, unsigned long set); 212#endif 213 214#define CPACC_FULL(n) (3 << (n * 2)) 215#define CPACC_SVC(n) (1 << (n * 2)) 216#define CPACC_DISABLE(n) (0 << (n * 2)) 217 218static inline unsigned int get_copro_access(void) 219{ 220 unsigned int val; 221 asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access" 222 : "=r" (val) : : "cc"); 223 return val; 224} 225 226static inline void set_copro_access(unsigned int val) 227{ 228 asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access" 229 : : "r" (val) : "cc"); 230 isb(); 231} 232 233/* 234 * switch_mm() may do a full cache flush over the context switch, 235 * so enable interrupts over the context switch to avoid high 236 * latency. 237 */ 238#define __ARCH_WANT_INTERRUPTS_ON_CTXSW 239 240/* 241 * switch_to(prev, next) should switch from task `prev' to `next' 242 * `prev' will never be the same as `next'. schedule() itself 243 * contains the memory barrier to tell GCC not to cache `current'. 244 */ 245extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *); 246 247#define switch_to(prev,next,last) \ 248do { \ 249 last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ 250} while (0) 251 252/* 253 * On SMP systems, when the scheduler does migration-cost autodetection, 254 * it needs a way to flush as much of the CPU's caches as possible. 255 * 256 * TODO: fill this in! 257 */ 258static inline void sched_cacheflush(void) 259{ 260} 261 262#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) 263/* 264 * On the StrongARM, "swp" is terminally broken since it bypasses the 265 * cache totally. This means that the cache becomes inconsistent, and, 266 * since we use normal loads/stores as well, this is really bad. 267 * Typically, this causes oopsen in filp_close, but could have other, 268 * more disasterous effects. There are two work-arounds: 269 * 1. Disable interrupts and emulate the atomic swap 270 * 2. Clean the cache, perform atomic swap, flush the cache 271 * 272 * We choose (1) since its the "easiest" to achieve here and is not 273 * dependent on the processor type. 274 * 275 * NOTE that this solution won't work on an SMP system, so explcitly 276 * forbid it here. 277 */ 278#define swp_is_buggy 279#endif 280 281static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) 282{ 283 extern void __bad_xchg(volatile void *, int); 284 unsigned long ret; 285#ifdef swp_is_buggy 286 unsigned long flags; 287#endif 288#if __LINUX_ARM_ARCH__ >= 6 289 unsigned int tmp; 290#endif 291 292 switch (size) { 293#if __LINUX_ARM_ARCH__ >= 6 294 case 1: 295 asm volatile("@ __xchg1\n" 296 "1: ldrexb %0, [%3]\n" 297 " strexb %1, %2, [%3]\n" 298 " teq %1, #0\n" 299 " bne 1b" 300 : "=&r" (ret), "=&r" (tmp) 301 : "r" (x), "r" (ptr) 302 : "memory", "cc"); 303 break; 304 case 4: 305 asm volatile("@ __xchg4\n" 306 "1: ldrex %0, [%3]\n" 307 " strex %1, %2, [%3]\n" 308 " teq %1, #0\n" 309 " bne 1b" 310 : "=&r" (ret), "=&r" (tmp) 311 : "r" (x), "r" (ptr) 312 : "memory", "cc"); 313 break; 314#elif defined(swp_is_buggy) 315#ifdef CONFIG_SMP 316#error SMP is not supported on this platform 317#endif 318 case 1: 319 raw_local_irq_save(flags); 320 ret = *(volatile unsigned char *)ptr; 321 *(volatile unsigned char *)ptr = x; 322 raw_local_irq_restore(flags); 323 break; 324 325 case 4: 326 raw_local_irq_save(flags); 327 ret = *(volatile unsigned long *)ptr; 328 *(volatile unsigned long *)ptr = x; 329 raw_local_irq_restore(flags); 330 break; 331#else 332 case 1: 333 asm volatile("@ __xchg1\n" 334 " swpb %0, %1, [%2]" 335 : "=&r" (ret) 336 : "r" (x), "r" (ptr) 337 : "memory", "cc"); 338 break; 339 case 4: 340 asm volatile("@ __xchg4\n" 341 " swp %0, %1, [%2]" 342 : "=&r" (ret) 343 : "r" (x), "r" (ptr) 344 : "memory", "cc"); 345 break; 346#endif 347 default: 348 __bad_xchg(ptr, size), ret = 0; 349 break; 350 } 351 352 return ret; 353} 354 355extern void disable_hlt(void); 356extern void enable_hlt(void); 357 358#endif /* __ASSEMBLY__ */ 359 360#define arch_align_stack(x) (x) 361 362#endif /* __KERNEL__ */ 363 364#endif