Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.19-rc3 334 lines 8.8 kB view raw
1#ifndef __ASM_ARM_SYSTEM_H 2#define __ASM_ARM_SYSTEM_H 3 4#ifdef __KERNEL__ 5 6 7#define CPU_ARCH_UNKNOWN 0 8#define CPU_ARCH_ARMv3 1 9#define CPU_ARCH_ARMv4 2 10#define CPU_ARCH_ARMv4T 3 11#define CPU_ARCH_ARMv5 4 12#define CPU_ARCH_ARMv5T 5 13#define CPU_ARCH_ARMv5TE 6 14#define CPU_ARCH_ARMv5TEJ 7 15#define CPU_ARCH_ARMv6 8 16 17/* 18 * CR1 bits (CP#15 CR1) 19 */ 20#define CR_M (1 << 0) /* MMU enable */ 21#define CR_A (1 << 1) /* Alignment abort enable */ 22#define CR_C (1 << 2) /* Dcache enable */ 23#define CR_W (1 << 3) /* Write buffer enable */ 24#define CR_P (1 << 4) /* 32-bit exception handler */ 25#define CR_D (1 << 5) /* 32-bit data address range */ 26#define CR_L (1 << 6) /* Implementation defined */ 27#define CR_B (1 << 7) /* Big endian */ 28#define CR_S (1 << 8) /* System MMU protection */ 29#define CR_R (1 << 9) /* ROM MMU protection */ 30#define CR_F (1 << 10) /* Implementation defined */ 31#define CR_Z (1 << 11) /* Implementation defined */ 32#define CR_I (1 << 12) /* Icache enable */ 33#define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */ 34#define CR_RR (1 << 14) /* Round Robin cache replacement */ 35#define CR_L4 (1 << 15) /* LDR pc can set T bit */ 36#define CR_DT (1 << 16) 37#define CR_IT (1 << 18) 38#define CR_ST (1 << 19) 39#define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */ 40#define CR_U (1 << 22) /* Unaligned access operation */ 41#define CR_XP (1 << 23) /* Extended page tables */ 42#define CR_VE (1 << 24) /* Vectored interrupts */ 43 44#define CPUID_ID 0 45#define CPUID_CACHETYPE 1 46#define CPUID_TCM 2 47#define CPUID_TLBTYPE 3 48 49#ifdef CONFIG_CPU_CP15 50#define read_cpuid(reg) \ 51 ({ \ 52 unsigned int __val; \ 53 asm("mrc p15, 0, %0, c0, c0, " __stringify(reg) \ 54 : "=r" (__val) \ 55 : \ 56 : "cc"); \ 57 __val; \ 58 }) 59#else 60#define read_cpuid(reg) (processor_id) 61#endif 62 63/* 64 * This is used to ensure the compiler did actually allocate the register we 65 * asked it for some inline assembly sequences. Apparently we can't trust 66 * the compiler from one version to another so a bit of paranoia won't hurt. 67 * This string is meant to be concatenated with the inline asm string and 68 * will cause compilation to stop on mismatch. 69 * (for details, see gcc PR 15089) 70 */ 71#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" 72 73#ifndef __ASSEMBLY__ 74 75#include <linux/linkage.h> 76 77struct thread_info; 78struct task_struct; 79 80/* information about the system we're running on */ 81extern unsigned int system_rev; 82extern unsigned int system_serial_low; 83extern unsigned int system_serial_high; 84extern unsigned int mem_fclk_21285; 85 86struct pt_regs; 87 88void die(const char *msg, struct pt_regs *regs, int err) 89 __attribute__((noreturn)); 90 91struct siginfo; 92void notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, 93 unsigned long err, unsigned long trap); 94 95void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, 96 struct pt_regs *), 97 int sig, const char *name); 98 99#define xchg(ptr,x) \ 100 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 101 102#define tas(ptr) (xchg((ptr),1)) 103 104extern asmlinkage void __backtrace(void); 105extern asmlinkage void c_backtrace(unsigned long fp, int pmode); 106 107struct mm_struct; 108extern void show_pte(struct mm_struct *mm, unsigned long addr); 109extern void __show_regs(struct pt_regs *); 110 111extern int cpu_architecture(void); 112extern void cpu_init(void); 113 114void arm_machine_restart(char mode); 115extern void (*arm_pm_restart)(char str); 116 117/* 118 * Intel's XScale3 core supports some v6 features (supersections, L2) 119 * but advertises itself as v5 as it does not support the v6 ISA. For 120 * this reason, we need a way to explicitly test for this type of CPU. 121 */ 122#ifndef CONFIG_CPU_XSC3 123#define cpu_is_xsc3() 0 124#else 125static inline int cpu_is_xsc3(void) 126{ 127 extern unsigned int processor_id; 128 129 if ((processor_id & 0xffffe000) == 0x69056000) 130 return 1; 131 132 return 0; 133} 134#endif 135 136#if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3) 137#define cpu_is_xscale() 0 138#else 139#define cpu_is_xscale() 1 140#endif 141 142#define set_cr(x) \ 143 __asm__ __volatile__( \ 144 "mcr p15, 0, %0, c1, c0, 0 @ set CR" \ 145 : : "r" (x) : "cc") 146 147#define get_cr() \ 148 ({ \ 149 unsigned int __val; \ 150 __asm__ __volatile__( \ 151 "mrc p15, 0, %0, c1, c0, 0 @ get CR" \ 152 : "=r" (__val) : : "cc"); \ 153 __val; \ 154 }) 155 156extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ 157extern unsigned long cr_alignment; /* defined in entry-armv.S */ 158 159#define UDBG_UNDEFINED (1 << 0) 160#define UDBG_SYSCALL (1 << 1) 161#define UDBG_BADABORT (1 << 2) 162#define UDBG_SEGV (1 << 3) 163#define UDBG_BUS (1 << 4) 164 165extern unsigned int user_debug; 166 167#if __LINUX_ARM_ARCH__ >= 4 168#define vectors_high() (cr_alignment & CR_V) 169#else 170#define vectors_high() (0) 171#endif 172 173#if __LINUX_ARM_ARCH__ >= 6 174#define mb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ 175 : : "r" (0) : "memory") 176#else 177#define mb() __asm__ __volatile__ ("" : : : "memory") 178#endif 179#define rmb() mb() 180#define wmb() mb() 181#define read_barrier_depends() do { } while(0) 182#define set_mb(var, value) do { var = value; mb(); } while (0) 183#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); 184 185/* 186 * switch_mm() may do a full cache flush over the context switch, 187 * so enable interrupts over the context switch to avoid high 188 * latency. 189 */ 190#define __ARCH_WANT_INTERRUPTS_ON_CTXSW 191 192/* 193 * switch_to(prev, next) should switch from task `prev' to `next' 194 * `prev' will never be the same as `next'. schedule() itself 195 * contains the memory barrier to tell GCC not to cache `current'. 196 */ 197extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *); 198 199#define switch_to(prev,next,last) \ 200do { \ 201 last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ 202} while (0) 203 204/* 205 * On SMP systems, when the scheduler does migration-cost autodetection, 206 * it needs a way to flush as much of the CPU's caches as possible. 207 * 208 * TODO: fill this in! 209 */ 210static inline void sched_cacheflush(void) 211{ 212} 213 214#include <linux/irqflags.h> 215 216#ifdef CONFIG_SMP 217 218#define smp_mb() mb() 219#define smp_rmb() rmb() 220#define smp_wmb() wmb() 221#define smp_read_barrier_depends() read_barrier_depends() 222 223#else 224 225#define smp_mb() barrier() 226#define smp_rmb() barrier() 227#define smp_wmb() barrier() 228#define smp_read_barrier_depends() do { } while(0) 229 230#endif /* CONFIG_SMP */ 231 232#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) 233/* 234 * On the StrongARM, "swp" is terminally broken since it bypasses the 235 * cache totally. This means that the cache becomes inconsistent, and, 236 * since we use normal loads/stores as well, this is really bad. 237 * Typically, this causes oopsen in filp_close, but could have other, 238 * more disasterous effects. There are two work-arounds: 239 * 1. Disable interrupts and emulate the atomic swap 240 * 2. Clean the cache, perform atomic swap, flush the cache 241 * 242 * We choose (1) since its the "easiest" to achieve here and is not 243 * dependent on the processor type. 244 * 245 * NOTE that this solution won't work on an SMP system, so explcitly 246 * forbid it here. 247 */ 248#define swp_is_buggy 249#endif 250 251static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) 252{ 253 extern void __bad_xchg(volatile void *, int); 254 unsigned long ret; 255#ifdef swp_is_buggy 256 unsigned long flags; 257#endif 258#if __LINUX_ARM_ARCH__ >= 6 259 unsigned int tmp; 260#endif 261 262 switch (size) { 263#if __LINUX_ARM_ARCH__ >= 6 264 case 1: 265 asm volatile("@ __xchg1\n" 266 "1: ldrexb %0, [%3]\n" 267 " strexb %1, %2, [%3]\n" 268 " teq %1, #0\n" 269 " bne 1b" 270 : "=&r" (ret), "=&r" (tmp) 271 : "r" (x), "r" (ptr) 272 : "memory", "cc"); 273 break; 274 case 4: 275 asm volatile("@ __xchg4\n" 276 "1: ldrex %0, [%3]\n" 277 " strex %1, %2, [%3]\n" 278 " teq %1, #0\n" 279 " bne 1b" 280 : "=&r" (ret), "=&r" (tmp) 281 : "r" (x), "r" (ptr) 282 : "memory", "cc"); 283 break; 284#elif defined(swp_is_buggy) 285#ifdef CONFIG_SMP 286#error SMP is not supported on this platform 287#endif 288 case 1: 289 raw_local_irq_save(flags); 290 ret = *(volatile unsigned char *)ptr; 291 *(volatile unsigned char *)ptr = x; 292 raw_local_irq_restore(flags); 293 break; 294 295 case 4: 296 raw_local_irq_save(flags); 297 ret = *(volatile unsigned long *)ptr; 298 *(volatile unsigned long *)ptr = x; 299 raw_local_irq_restore(flags); 300 break; 301#else 302 case 1: 303 asm volatile("@ __xchg1\n" 304 " swpb %0, %1, [%2]" 305 : "=&r" (ret) 306 : "r" (x), "r" (ptr) 307 : "memory", "cc"); 308 break; 309 case 4: 310 asm volatile("@ __xchg4\n" 311 " swp %0, %1, [%2]" 312 : "=&r" (ret) 313 : "r" (x), "r" (ptr) 314 : "memory", "cc"); 315 break; 316#endif 317 default: 318 __bad_xchg(ptr, size), ret = 0; 319 break; 320 } 321 322 return ret; 323} 324 325extern void disable_hlt(void); 326extern void enable_hlt(void); 327 328#endif /* __ASSEMBLY__ */ 329 330#define arch_align_stack(x) (x) 331 332#endif /* __KERNEL__ */ 333 334#endif