Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.17-rc6 452 lines 11 kB view raw
1#ifndef __ASM_ARM_SYSTEM_H 2#define __ASM_ARM_SYSTEM_H 3 4#ifdef __KERNEL__ 5 6#include <linux/config.h> 7 8#define CPU_ARCH_UNKNOWN 0 9#define CPU_ARCH_ARMv3 1 10#define CPU_ARCH_ARMv4 2 11#define CPU_ARCH_ARMv4T 3 12#define CPU_ARCH_ARMv5 4 13#define CPU_ARCH_ARMv5T 5 14#define CPU_ARCH_ARMv5TE 6 15#define CPU_ARCH_ARMv5TEJ 7 16#define CPU_ARCH_ARMv6 8 17 18/* 19 * CR1 bits (CP#15 CR1) 20 */ 21#define CR_M (1 << 0) /* MMU enable */ 22#define CR_A (1 << 1) /* Alignment abort enable */ 23#define CR_C (1 << 2) /* Dcache enable */ 24#define CR_W (1 << 3) /* Write buffer enable */ 25#define CR_P (1 << 4) /* 32-bit exception handler */ 26#define CR_D (1 << 5) /* 32-bit data address range */ 27#define CR_L (1 << 6) /* Implementation defined */ 28#define CR_B (1 << 7) /* Big endian */ 29#define CR_S (1 << 8) /* System MMU protection */ 30#define CR_R (1 << 9) /* ROM MMU protection */ 31#define CR_F (1 << 10) /* Implementation defined */ 32#define CR_Z (1 << 11) /* Implementation defined */ 33#define CR_I (1 << 12) /* Icache enable */ 34#define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */ 35#define CR_RR (1 << 14) /* Round Robin cache replacement */ 36#define CR_L4 (1 << 15) /* LDR pc can set T bit */ 37#define CR_DT (1 << 16) 38#define CR_IT (1 << 18) 39#define CR_ST (1 << 19) 40#define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */ 41#define CR_U (1 << 22) /* Unaligned access operation */ 42#define CR_XP (1 << 23) /* Extended page tables */ 43#define CR_VE (1 << 24) /* Vectored interrupts */ 44 45#define CPUID_ID 0 46#define CPUID_CACHETYPE 1 47#define CPUID_TCM 2 48#define CPUID_TLBTYPE 3 49 50#define read_cpuid(reg) \ 51 ({ \ 52 unsigned int __val; \ 53 asm("mrc p15, 0, %0, c0, c0, " __stringify(reg) \ 54 : "=r" (__val) \ 55 : \ 56 : "cc"); \ 57 __val; \ 58 }) 59 60/* 61 * This is used to ensure the compiler did actually allocate the register we 62 * asked it for some inline assembly sequences. Apparently we can't trust 63 * the compiler from one version to another so a bit of paranoia won't hurt. 64 * This string is meant to be concatenated with the inline asm string and 65 * will cause compilation to stop on mismatch. 66 * (for details, see gcc PR 15089) 67 */ 68#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" 69 70#ifndef __ASSEMBLY__ 71 72#include <linux/linkage.h> 73 74struct thread_info; 75struct task_struct; 76 77/* information about the system we're running on */ 78extern unsigned int system_rev; 79extern unsigned int system_serial_low; 80extern unsigned int system_serial_high; 81extern unsigned int mem_fclk_21285; 82 83struct pt_regs; 84 85void die(const char *msg, struct pt_regs *regs, int err) 86 __attribute__((noreturn)); 87 88struct siginfo; 89void notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, 90 unsigned long err, unsigned long trap); 91 92void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, 93 struct pt_regs *), 94 int sig, const char *name); 95 96#define xchg(ptr,x) \ 97 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 98 99#define tas(ptr) (xchg((ptr),1)) 100 101extern asmlinkage void __backtrace(void); 102extern asmlinkage void c_backtrace(unsigned long fp, int pmode); 103 104struct mm_struct; 105extern void show_pte(struct mm_struct *mm, unsigned long addr); 106extern void __show_regs(struct pt_regs *); 107 108extern int cpu_architecture(void); 109extern void cpu_init(void); 110 111/* 112 * Intel's XScale3 core supports some v6 features (supersections, L2) 113 * but advertises itself as v5 as it does not support the v6 ISA. For 114 * this reason, we need a way to explicitly test for this type of CPU. 115 */ 116#ifndef CONFIG_CPU_XSC3 117#define cpu_is_xsc3() 0 118#else 119static inline int cpu_is_xsc3(void) 120{ 121 extern unsigned int processor_id; 122 123 if ((processor_id & 0xffffe000) == 0x69056000) 124 return 1; 125 126 return 0; 127} 128#endif 129 130#if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3) 131#define cpu_is_xscale() 0 132#else 133#define cpu_is_xscale() 1 134#endif 135 136#define set_cr(x) \ 137 __asm__ __volatile__( \ 138 "mcr p15, 0, %0, c1, c0, 0 @ set CR" \ 139 : : "r" (x) : "cc") 140 141#define get_cr() \ 142 ({ \ 143 unsigned int __val; \ 144 __asm__ __volatile__( \ 145 "mrc p15, 0, %0, c1, c0, 0 @ get CR" \ 146 : "=r" (__val) : : "cc"); \ 147 __val; \ 148 }) 149 150extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ 151extern unsigned long cr_alignment; /* defined in entry-armv.S */ 152 153#define UDBG_UNDEFINED (1 << 0) 154#define UDBG_SYSCALL (1 << 1) 155#define UDBG_BADABORT (1 << 2) 156#define UDBG_SEGV (1 << 3) 157#define UDBG_BUS (1 << 4) 158 159extern unsigned int user_debug; 160 161#if __LINUX_ARM_ARCH__ >= 4 162#define vectors_high() (cr_alignment & CR_V) 163#else 164#define vectors_high() (0) 165#endif 166 167#if __LINUX_ARM_ARCH__ >= 6 168#define mb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ 169 : : "r" (0) : "memory") 170#else 171#define mb() __asm__ __volatile__ ("" : : : "memory") 172#endif 173#define rmb() mb() 174#define wmb() mb() 175#define read_barrier_depends() do { } while(0) 176#define set_mb(var, value) do { var = value; mb(); } while (0) 177#define set_wmb(var, value) do { var = value; wmb(); } while (0) 178#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); 179 180/* 181 * switch_mm() may do a full cache flush over the context switch, 182 * so enable interrupts over the context switch to avoid high 183 * latency. 184 */ 185#define __ARCH_WANT_INTERRUPTS_ON_CTXSW 186 187/* 188 * switch_to(prev, next) should switch from task `prev' to `next' 189 * `prev' will never be the same as `next'. schedule() itself 190 * contains the memory barrier to tell GCC not to cache `current'. 191 */ 192extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *); 193 194#define switch_to(prev,next,last) \ 195do { \ 196 last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ 197} while (0) 198 199/* 200 * On SMP systems, when the scheduler does migration-cost autodetection, 201 * it needs a way to flush as much of the CPU's caches as possible. 202 * 203 * TODO: fill this in! 204 */ 205static inline void sched_cacheflush(void) 206{ 207} 208 209/* 210 * CPU interrupt mask handling. 211 */ 212#if __LINUX_ARM_ARCH__ >= 6 213 214#define local_irq_save(x) \ 215 ({ \ 216 __asm__ __volatile__( \ 217 "mrs %0, cpsr @ local_irq_save\n" \ 218 "cpsid i" \ 219 : "=r" (x) : : "memory", "cc"); \ 220 }) 221 222#define local_irq_enable() __asm__("cpsie i @ __sti" : : : "memory", "cc") 223#define local_irq_disable() __asm__("cpsid i @ __cli" : : : "memory", "cc") 224#define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc") 225#define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc") 226 227#else 228 229/* 230 * Save the current interrupt enable state & disable IRQs 231 */ 232#define local_irq_save(x) \ 233 ({ \ 234 unsigned long temp; \ 235 (void) (&temp == &x); \ 236 __asm__ __volatile__( \ 237 "mrs %0, cpsr @ local_irq_save\n" \ 238" orr %1, %0, #128\n" \ 239" msr cpsr_c, %1" \ 240 : "=r" (x), "=r" (temp) \ 241 : \ 242 : "memory", "cc"); \ 243 }) 244 245/* 246 * Enable IRQs 247 */ 248#define local_irq_enable() \ 249 ({ \ 250 unsigned long temp; \ 251 __asm__ __volatile__( \ 252 "mrs %0, cpsr @ local_irq_enable\n" \ 253" bic %0, %0, #128\n" \ 254" msr cpsr_c, %0" \ 255 : "=r" (temp) \ 256 : \ 257 : "memory", "cc"); \ 258 }) 259 260/* 261 * Disable IRQs 262 */ 263#define local_irq_disable() \ 264 ({ \ 265 unsigned long temp; \ 266 __asm__ __volatile__( \ 267 "mrs %0, cpsr @ local_irq_disable\n" \ 268" orr %0, %0, #128\n" \ 269" msr cpsr_c, %0" \ 270 : "=r" (temp) \ 271 : \ 272 : "memory", "cc"); \ 273 }) 274 275/* 276 * Enable FIQs 277 */ 278#define local_fiq_enable() \ 279 ({ \ 280 unsigned long temp; \ 281 __asm__ __volatile__( \ 282 "mrs %0, cpsr @ stf\n" \ 283" bic %0, %0, #64\n" \ 284" msr cpsr_c, %0" \ 285 : "=r" (temp) \ 286 : \ 287 : "memory", "cc"); \ 288 }) 289 290/* 291 * Disable FIQs 292 */ 293#define local_fiq_disable() \ 294 ({ \ 295 unsigned long temp; \ 296 __asm__ __volatile__( \ 297 "mrs %0, cpsr @ clf\n" \ 298" orr %0, %0, #64\n" \ 299" msr cpsr_c, %0" \ 300 : "=r" (temp) \ 301 : \ 302 : "memory", "cc"); \ 303 }) 304 305#endif 306 307/* 308 * Save the current interrupt enable state. 309 */ 310#define local_save_flags(x) \ 311 ({ \ 312 __asm__ __volatile__( \ 313 "mrs %0, cpsr @ local_save_flags" \ 314 : "=r" (x) : : "memory", "cc"); \ 315 }) 316 317/* 318 * restore saved IRQ & FIQ state 319 */ 320#define local_irq_restore(x) \ 321 __asm__ __volatile__( \ 322 "msr cpsr_c, %0 @ local_irq_restore\n" \ 323 : \ 324 : "r" (x) \ 325 : "memory", "cc") 326 327#define irqs_disabled() \ 328({ \ 329 unsigned long flags; \ 330 local_save_flags(flags); \ 331 (int)(flags & PSR_I_BIT); \ 332}) 333 334#ifdef CONFIG_SMP 335 336#define smp_mb() mb() 337#define smp_rmb() rmb() 338#define smp_wmb() wmb() 339#define smp_read_barrier_depends() read_barrier_depends() 340 341#else 342 343#define smp_mb() barrier() 344#define smp_rmb() barrier() 345#define smp_wmb() barrier() 346#define smp_read_barrier_depends() do { } while(0) 347 348#endif /* CONFIG_SMP */ 349 350#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) 351/* 352 * On the StrongARM, "swp" is terminally broken since it bypasses the 353 * cache totally. This means that the cache becomes inconsistent, and, 354 * since we use normal loads/stores as well, this is really bad. 355 * Typically, this causes oopsen in filp_close, but could have other, 356 * more disasterous effects. There are two work-arounds: 357 * 1. Disable interrupts and emulate the atomic swap 358 * 2. Clean the cache, perform atomic swap, flush the cache 359 * 360 * We choose (1) since its the "easiest" to achieve here and is not 361 * dependent on the processor type. 362 * 363 * NOTE that this solution won't work on an SMP system, so explcitly 364 * forbid it here. 365 */ 366#define swp_is_buggy 367#endif 368 369static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) 370{ 371 extern void __bad_xchg(volatile void *, int); 372 unsigned long ret; 373#ifdef swp_is_buggy 374 unsigned long flags; 375#endif 376#if __LINUX_ARM_ARCH__ >= 6 377 unsigned int tmp; 378#endif 379 380 switch (size) { 381#if __LINUX_ARM_ARCH__ >= 6 382 case 1: 383 asm volatile("@ __xchg1\n" 384 "1: ldrexb %0, [%3]\n" 385 " strexb %1, %2, [%3]\n" 386 " teq %1, #0\n" 387 " bne 1b" 388 : "=&r" (ret), "=&r" (tmp) 389 : "r" (x), "r" (ptr) 390 : "memory", "cc"); 391 break; 392 case 4: 393 asm volatile("@ __xchg4\n" 394 "1: ldrex %0, [%3]\n" 395 " strex %1, %2, [%3]\n" 396 " teq %1, #0\n" 397 " bne 1b" 398 : "=&r" (ret), "=&r" (tmp) 399 : "r" (x), "r" (ptr) 400 : "memory", "cc"); 401 break; 402#elif defined(swp_is_buggy) 403#ifdef CONFIG_SMP 404#error SMP is not supported on this platform 405#endif 406 case 1: 407 local_irq_save(flags); 408 ret = *(volatile unsigned char *)ptr; 409 *(volatile unsigned char *)ptr = x; 410 local_irq_restore(flags); 411 break; 412 413 case 4: 414 local_irq_save(flags); 415 ret = *(volatile unsigned long *)ptr; 416 *(volatile unsigned long *)ptr = x; 417 local_irq_restore(flags); 418 break; 419#else 420 case 1: 421 asm volatile("@ __xchg1\n" 422 " swpb %0, %1, [%2]" 423 : "=&r" (ret) 424 : "r" (x), "r" (ptr) 425 : "memory", "cc"); 426 break; 427 case 4: 428 asm volatile("@ __xchg4\n" 429 " swp %0, %1, [%2]" 430 : "=&r" (ret) 431 : "r" (x), "r" (ptr) 432 : "memory", "cc"); 433 break; 434#endif 435 default: 436 __bad_xchg(ptr, size), ret = 0; 437 break; 438 } 439 440 return ret; 441} 442 443extern void disable_hlt(void); 444extern void enable_hlt(void); 445 446#endif /* __ASSEMBLY__ */ 447 448#define arch_align_stack(x) (x) 449 450#endif /* __KERNEL__ */ 451 452#endif