at v2.6.12 252 lines 7.8 kB view raw
1#ifndef __ASM_ARM_SYSTEM_H 2#define __ASM_ARM_SYSTEM_H 3 4#ifdef __KERNEL__ 5 6#include <linux/config.h> 7 8/* 9 * This is used to ensure the compiler did actually allocate the register we 10 * asked it for some inline assembly sequences. Apparently we can't trust 11 * the compiler from one version to another so a bit of paranoia won't hurt. 12 * This string is meant to be concatenated with the inline asm string and 13 * will cause compilation to stop on mismatch. (From ARM32 - may come in handy) 14 */ 15#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" 16 17#ifndef __ASSEMBLY__ 18 19#include <linux/linkage.h> 20 21struct thread_info; 22struct task_struct; 23 24#if 0 25/* information about the system we're running on */ 26extern unsigned int system_rev; 27extern unsigned int system_serial_low; 28extern unsigned int system_serial_high; 29extern unsigned int mem_fclk_21285; 30 31FIXME - sort this 32/* 33 * We need to turn the caches off before calling the reset vector - RiscOS 34 * messes up if we don't 35 */ 36#define proc_hard_reset() cpu_proc_fin() 37 38#endif 39 40struct pt_regs; 41 42void die(const char *msg, struct pt_regs *regs, int err) 43 __attribute__((noreturn)); 44 45void die_if_kernel(const char *str, struct pt_regs *regs, int err); 46 47void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, 48 struct pt_regs *), 49 int sig, const char *name); 50 51#include <asm/proc-fns.h> 52 53#define xchg(ptr,x) \ 54 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 55 56#define tas(ptr) (xchg((ptr),1)) 57 58extern asmlinkage void __backtrace(void); 59 60#define set_cr(x) \ 61 __asm__ __volatile__( \ 62 "mcr p15, 0, %0, c1, c0, 0 @ set CR" \ 63 : : "r" (x) : "cc") 64 65#define get_cr() \ 66 ({ \ 67 unsigned int __val; \ 68 __asm__ __volatile__( \ 69 "mrc p15, 0, %0, c1, c0, 0 @ get CR" \ 70 : "=r" (__val) : : "cc"); \ 71 __val; \ 72 }) 73 74extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ 75extern unsigned long cr_alignment; /* defined in entry-armv.S */ 76 77#define UDBG_UNDEFINED (1 << 0) 78#define UDBG_SYSCALL (1 << 1) 79#define UDBG_BADABORT (1 << 2) 80#define UDBG_SEGV (1 << 3) 81#define UDBG_BUS (1 << 4) 82 83extern unsigned int user_debug; 84 85#define vectors_base() (0) 86 87#define mb() __asm__ __volatile__ ("" : : : "memory") 88#define rmb() mb() 89#define wmb() mb() 90#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); 91 92#define read_barrier_depends() do { } while(0) 93#define set_mb(var, value) do { var = value; mb(); } while (0) 94#define set_wmb(var, value) do { var = value; wmb(); } while (0) 95 96/* 97 * We assume knowledge of how 98 * spin_unlock_irq() and friends are implemented. This avoids 99 * us needlessly decrementing and incrementing the preempt count. 100 */ 101#define prepare_arch_switch(rq,next) local_irq_enable() 102#define finish_arch_switch(rq,prev) spin_unlock(&(rq)->lock) 103#define task_running(rq,p) ((rq)->curr == (p)) 104 105/* 106 * switch_to(prev, next) should switch from task `prev' to `next' 107 * `prev' will never be the same as `next'. schedule() itself 108 * contains the memory barrier to tell GCC not to cache `current'. 109 */ 110extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *); 111 112#define switch_to(prev,next,last) \ 113do { \ 114 last = __switch_to(prev,prev->thread_info,next->thread_info); \ 115} while (0) 116 117/* 118 * Save the current interrupt enable state & disable IRQs 119 */ 120#define local_irq_save(x) \ 121 do { \ 122 unsigned long temp; \ 123 __asm__ __volatile__( \ 124" mov %0, pc @ save_flags_cli\n" \ 125" orr %1, %0, #0x08000000\n" \ 126" and %0, %0, #0x0c000000\n" \ 127" teqp %1, #0\n" \ 128 : "=r" (x), "=r" (temp) \ 129 : \ 130 : "memory"); \ 131 } while (0) 132 133/* 134 * Enable IRQs (sti) 135 */ 136#define local_irq_enable() \ 137 do { \ 138 unsigned long temp; \ 139 __asm__ __volatile__( \ 140" mov %0, pc @ sti\n" \ 141" bic %0, %0, #0x08000000\n" \ 142" teqp %0, #0\n" \ 143 : "=r" (temp) \ 144 : \ 145 : "memory"); \ 146 } while(0) 147 148/* 149 * Disable IRQs (cli) 150 */ 151#define local_irq_disable() \ 152 do { \ 153 unsigned long temp; \ 154 __asm__ __volatile__( \ 155" mov %0, pc @ cli\n" \ 156" orr %0, %0, #0x08000000\n" \ 157" teqp %0, #0\n" \ 158 : "=r" (temp) \ 159 : \ 160 : "memory"); \ 161 } while(0) 162 163/* Enable FIQs (stf) */ 164 165#define __stf() do { \ 166 unsigned long temp; \ 167 __asm__ __volatile__( \ 168" mov %0, pc @ stf\n" \ 169" bic %0, %0, #0x04000000\n" \ 170" teqp %0, #0\n" \ 171 : "=r" (temp)); \ 172 } while(0) 173 174/* Disable FIQs (clf) */ 175 176#define __clf() do { \ 177 unsigned long temp; \ 178 __asm__ __volatile__( \ 179" mov %0, pc @ clf\n" \ 180" orr %0, %0, #0x04000000\n" \ 181" teqp %0, #0\n" \ 182 : "=r" (temp)); \ 183 } while(0) 184 185 186/* 187 * Save the current interrupt enable state. 188 */ 189#define local_save_flags(x) \ 190 do { \ 191 __asm__ __volatile__( \ 192" mov %0, pc @ save_flags\n" \ 193" and %0, %0, #0x0c000000\n" \ 194 : "=r" (x)); \ 195 } while (0) 196 197 198/* 199 * restore saved IRQ & FIQ state 200 */ 201#define local_irq_restore(x) \ 202 do { \ 203 unsigned long temp; \ 204 __asm__ __volatile__( \ 205" mov %0, pc @ restore_flags\n" \ 206" bic %0, %0, #0x0c000000\n" \ 207" orr %0, %0, %1\n" \ 208" teqp %0, #0\n" \ 209 : "=&r" (temp) \ 210 : "r" (x) \ 211 : "memory"); \ 212 } while (0) 213 214 215#ifdef CONFIG_SMP 216#error SMP not supported 217#endif 218 219#define smp_mb() barrier() 220#define smp_rmb() barrier() 221#define smp_wmb() barrier() 222#define smp_read_barrier_depends() do { } while(0) 223 224#define clf() __clf() 225#define stf() __stf() 226 227#define irqs_disabled() \ 228({ \ 229 unsigned long flags; \ 230 local_save_flags(flags); \ 231 flags & PSR_I_BIT; \ 232}) 233 234static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) 235{ 236 extern void __bad_xchg(volatile void *, int); 237 238 switch (size) { 239 case 1: return cpu_xchg_1(x, ptr); 240 case 4: return cpu_xchg_4(x, ptr); 241 default: __bad_xchg(ptr, size); 242 } 243 return 0; 244} 245 246#endif /* __ASSEMBLY__ */ 247 248#define arch_align_stack(x) (x) 249 250#endif /* __KERNEL__ */ 251 252#endif