Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.21-rc2 364 lines 9.6 kB view raw
1#ifndef __ASM_ARM_SYSTEM_H 2#define __ASM_ARM_SYSTEM_H 3 4#ifdef __KERNEL__ 5 6 7#define CPU_ARCH_UNKNOWN 0 8#define CPU_ARCH_ARMv3 1 9#define CPU_ARCH_ARMv4 2 10#define CPU_ARCH_ARMv4T 3 11#define CPU_ARCH_ARMv5 4 12#define CPU_ARCH_ARMv5T 5 13#define CPU_ARCH_ARMv5TE 6 14#define CPU_ARCH_ARMv5TEJ 7 15#define CPU_ARCH_ARMv6 8 16 17/* 18 * CR1 bits (CP#15 CR1) 19 */ 20#define CR_M (1 << 0) /* MMU enable */ 21#define CR_A (1 << 1) /* Alignment abort enable */ 22#define CR_C (1 << 2) /* Dcache enable */ 23#define CR_W (1 << 3) /* Write buffer enable */ 24#define CR_P (1 << 4) /* 32-bit exception handler */ 25#define CR_D (1 << 5) /* 32-bit data address range */ 26#define CR_L (1 << 6) /* Implementation defined */ 27#define CR_B (1 << 7) /* Big endian */ 28#define CR_S (1 << 8) /* System MMU protection */ 29#define CR_R (1 << 9) /* ROM MMU protection */ 30#define CR_F (1 << 10) /* Implementation defined */ 31#define CR_Z (1 << 11) /* Implementation defined */ 32#define CR_I (1 << 12) /* Icache enable */ 33#define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */ 34#define CR_RR (1 << 14) /* Round Robin cache replacement */ 35#define CR_L4 (1 << 15) /* LDR pc can set T bit */ 36#define CR_DT (1 << 16) 37#define CR_IT (1 << 18) 38#define CR_ST (1 << 19) 39#define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */ 40#define CR_U (1 << 22) /* Unaligned access operation */ 41#define CR_XP (1 << 23) /* Extended page tables */ 42#define CR_VE (1 << 24) /* Vectored interrupts */ 43 44#define CPUID_ID 0 45#define CPUID_CACHETYPE 1 46#define CPUID_TCM 2 47#define CPUID_TLBTYPE 3 48 49#ifdef CONFIG_CPU_CP15 50#define read_cpuid(reg) \ 51 ({ \ 52 unsigned int __val; \ 53 asm("mrc p15, 0, %0, c0, c0, " __stringify(reg) \ 54 : "=r" (__val) \ 55 : \ 56 : "cc"); \ 57 __val; \ 58 }) 59#else 60#define read_cpuid(reg) (processor_id) 61#endif 62 63/* 64 * This is used to ensure the compiler did actually allocate the register we 65 * asked it for some inline assembly sequences. Apparently we can't trust 66 * the compiler from one version to another so a bit of paranoia won't hurt. 67 * This string is meant to be concatenated with the inline asm string and 68 * will cause compilation to stop on mismatch. 69 * (for details, see gcc PR 15089) 70 */ 71#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" 72 73#ifndef __ASSEMBLY__ 74 75#include <linux/linkage.h> 76#include <linux/irqflags.h> 77 78struct thread_info; 79struct task_struct; 80 81/* information about the system we're running on */ 82extern unsigned int system_rev; 83extern unsigned int system_serial_low; 84extern unsigned int system_serial_high; 85extern unsigned int mem_fclk_21285; 86 87struct pt_regs; 88 89void die(const char *msg, struct pt_regs *regs, int err) 90 __attribute__((noreturn)); 91 92struct siginfo; 93void notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, 94 unsigned long err, unsigned long trap); 95 96void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, 97 struct pt_regs *), 98 int sig, const char *name); 99 100#define xchg(ptr,x) \ 101 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 102 103#define tas(ptr) (xchg((ptr),1)) 104 105extern asmlinkage void __backtrace(void); 106extern asmlinkage void c_backtrace(unsigned long fp, int pmode); 107 108struct mm_struct; 109extern void show_pte(struct mm_struct *mm, unsigned long addr); 110extern void __show_regs(struct pt_regs *); 111 112extern int cpu_architecture(void); 113extern void cpu_init(void); 114 115void arm_machine_restart(char mode); 116extern void (*arm_pm_restart)(char str); 117 118/* 119 * Intel's XScale3 core supports some v6 features (supersections, L2) 120 * but advertises itself as v5 as it does not support the v6 ISA. For 121 * this reason, we need a way to explicitly test for this type of CPU. 122 */ 123#ifndef CONFIG_CPU_XSC3 124#define cpu_is_xsc3() 0 125#else 126static inline int cpu_is_xsc3(void) 127{ 128 extern unsigned int processor_id; 129 130 if ((processor_id & 0xffffe000) == 0x69056000) 131 return 1; 132 133 return 0; 134} 135#endif 136 137#if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3) 138#define cpu_is_xscale() 0 139#else 140#define cpu_is_xscale() 1 141#endif 142 143#define UDBG_UNDEFINED (1 << 0) 144#define UDBG_SYSCALL (1 << 1) 145#define UDBG_BADABORT (1 << 2) 146#define UDBG_SEGV (1 << 3) 147#define UDBG_BUS (1 << 4) 148 149extern unsigned int user_debug; 150 151#if __LINUX_ARM_ARCH__ >= 4 152#define vectors_high() (cr_alignment & CR_V) 153#else 154#define vectors_high() (0) 155#endif 156 157#if __LINUX_ARM_ARCH__ >= 6 158#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ 159 : : "r" (0) : "memory") 160#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ 161 : : "r" (0) : "memory") 162#define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ 163 : : "r" (0) : "memory") 164#else 165#define isb() __asm__ __volatile__ ("" : : : "memory") 166#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ 167 : : "r" (0) : "memory") 168#define dmb() __asm__ __volatile__ ("" : : : "memory") 169#endif 170#define mb() dmb() 171#define rmb() mb() 172#define wmb() mb() 173#define read_barrier_depends() do { } while(0) 174#define set_mb(var, value) do { var = value; mb(); } while (0) 175#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); 176 177extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ 178extern unsigned long cr_alignment; /* defined in entry-armv.S */ 179 180static inline unsigned int get_cr(void) 181{ 182 unsigned int val; 183 asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc"); 184 return val; 185} 186 187static inline void set_cr(unsigned int val) 188{ 189 asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR" 190 : : "r" (val) : "cc"); 191 isb(); 192} 193 194#ifndef CONFIG_SMP 195extern void adjust_cr(unsigned long mask, unsigned long set); 196#endif 197 198#define CPACC_FULL(n) (3 << (n * 2)) 199#define CPACC_SVC(n) (1 << (n * 2)) 200#define CPACC_DISABLE(n) (0 << (n * 2)) 201 202static inline unsigned int get_copro_access(void) 203{ 204 unsigned int val; 205 asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access" 206 : "=r" (val) : : "cc"); 207 return val; 208} 209 210static inline void set_copro_access(unsigned int val) 211{ 212 asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access" 213 : : "r" (val) : "cc"); 214 isb(); 215} 216 217/* 218 * switch_mm() may do a full cache flush over the context switch, 219 * so enable interrupts over the context switch to avoid high 220 * latency. 221 */ 222#define __ARCH_WANT_INTERRUPTS_ON_CTXSW 223 224/* 225 * switch_to(prev, next) should switch from task `prev' to `next' 226 * `prev' will never be the same as `next'. schedule() itself 227 * contains the memory barrier to tell GCC not to cache `current'. 228 */ 229extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *); 230 231#define switch_to(prev,next,last) \ 232do { \ 233 last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ 234} while (0) 235 236/* 237 * On SMP systems, when the scheduler does migration-cost autodetection, 238 * it needs a way to flush as much of the CPU's caches as possible. 239 * 240 * TODO: fill this in! 241 */ 242static inline void sched_cacheflush(void) 243{ 244} 245 246#ifdef CONFIG_SMP 247 248#define smp_mb() mb() 249#define smp_rmb() rmb() 250#define smp_wmb() wmb() 251#define smp_read_barrier_depends() read_barrier_depends() 252 253#else 254 255#define smp_mb() barrier() 256#define smp_rmb() barrier() 257#define smp_wmb() barrier() 258#define smp_read_barrier_depends() do { } while(0) 259 260#endif /* CONFIG_SMP */ 261 262#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) 263/* 264 * On the StrongARM, "swp" is terminally broken since it bypasses the 265 * cache totally. This means that the cache becomes inconsistent, and, 266 * since we use normal loads/stores as well, this is really bad. 267 * Typically, this causes oopsen in filp_close, but could have other, 268 * more disasterous effects. There are two work-arounds: 269 * 1. Disable interrupts and emulate the atomic swap 270 * 2. Clean the cache, perform atomic swap, flush the cache 271 * 272 * We choose (1) since its the "easiest" to achieve here and is not 273 * dependent on the processor type. 274 * 275 * NOTE that this solution won't work on an SMP system, so explcitly 276 * forbid it here. 277 */ 278#define swp_is_buggy 279#endif 280 281static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) 282{ 283 extern void __bad_xchg(volatile void *, int); 284 unsigned long ret; 285#ifdef swp_is_buggy 286 unsigned long flags; 287#endif 288#if __LINUX_ARM_ARCH__ >= 6 289 unsigned int tmp; 290#endif 291 292 switch (size) { 293#if __LINUX_ARM_ARCH__ >= 6 294 case 1: 295 asm volatile("@ __xchg1\n" 296 "1: ldrexb %0, [%3]\n" 297 " strexb %1, %2, [%3]\n" 298 " teq %1, #0\n" 299 " bne 1b" 300 : "=&r" (ret), "=&r" (tmp) 301 : "r" (x), "r" (ptr) 302 : "memory", "cc"); 303 break; 304 case 4: 305 asm volatile("@ __xchg4\n" 306 "1: ldrex %0, [%3]\n" 307 " strex %1, %2, [%3]\n" 308 " teq %1, #0\n" 309 " bne 1b" 310 : "=&r" (ret), "=&r" (tmp) 311 : "r" (x), "r" (ptr) 312 : "memory", "cc"); 313 break; 314#elif defined(swp_is_buggy) 315#ifdef CONFIG_SMP 316#error SMP is not supported on this platform 317#endif 318 case 1: 319 raw_local_irq_save(flags); 320 ret = *(volatile unsigned char *)ptr; 321 *(volatile unsigned char *)ptr = x; 322 raw_local_irq_restore(flags); 323 break; 324 325 case 4: 326 raw_local_irq_save(flags); 327 ret = *(volatile unsigned long *)ptr; 328 *(volatile unsigned long *)ptr = x; 329 raw_local_irq_restore(flags); 330 break; 331#else 332 case 1: 333 asm volatile("@ __xchg1\n" 334 " swpb %0, %1, [%2]" 335 : "=&r" (ret) 336 : "r" (x), "r" (ptr) 337 : "memory", "cc"); 338 break; 339 case 4: 340 asm volatile("@ __xchg4\n" 341 " swp %0, %1, [%2]" 342 : "=&r" (ret) 343 : "r" (x), "r" (ptr) 344 : "memory", "cc"); 345 break; 346#endif 347 default: 348 __bad_xchg(ptr, size), ret = 0; 349 break; 350 } 351 352 return ret; 353} 354 355extern void disable_hlt(void); 356extern void enable_hlt(void); 357 358#endif /* __ASSEMBLY__ */ 359 360#define arch_align_stack(x) (x) 361 362#endif /* __KERNEL__ */ 363 364#endif