at v2.6.18 7.9 kB view raw
1#ifndef __ASM_ARM_SYSTEM_H 2#define __ASM_ARM_SYSTEM_H 3 4#ifdef __KERNEL__ 5 6 7/* 8 * This is used to ensure the compiler did actually allocate the register we 9 * asked it for some inline assembly sequences. Apparently we can't trust 10 * the compiler from one version to another so a bit of paranoia won't hurt. 11 * This string is meant to be concatenated with the inline asm string and 12 * will cause compilation to stop on mismatch. (From ARM32 - may come in handy) 13 */ 14#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" 15 16#ifndef __ASSEMBLY__ 17 18#include <linux/linkage.h> 19 20struct thread_info; 21struct task_struct; 22 23#if 0 24/* information about the system we're running on */ 25extern unsigned int system_rev; 26extern unsigned int system_serial_low; 27extern unsigned int system_serial_high; 28extern unsigned int mem_fclk_21285; 29 30FIXME - sort this 31/* 32 * We need to turn the caches off before calling the reset vector - RiscOS 33 * messes up if we don't 34 */ 35#define proc_hard_reset() cpu_proc_fin() 36 37#endif 38 39struct pt_regs; 40 41void die(const char *msg, struct pt_regs *regs, int err) 42 __attribute__((noreturn)); 43 44void die_if_kernel(const char *str, struct pt_regs *regs, int err); 45 46void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, 47 struct pt_regs *), 48 int sig, const char *name); 49 50#include <asm/proc-fns.h> 51 52#define xchg(ptr,x) \ 53 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 54 55#define tas(ptr) (xchg((ptr),1)) 56 57extern asmlinkage void __backtrace(void); 58 59#define set_cr(x) \ 60 __asm__ __volatile__( \ 61 "mcr p15, 0, %0, c1, c0, 0 @ set CR" \ 62 : : "r" (x) : "cc") 63 64#define get_cr() \ 65 ({ \ 66 unsigned int __val; \ 67 __asm__ __volatile__( \ 68 "mrc p15, 0, %0, c1, c0, 0 @ get CR" \ 69 : "=r" (__val) : : "cc"); \ 70 __val; \ 71 }) 72 73extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ 74extern unsigned long cr_alignment; /* defined in entry-armv.S */ 75 76#define UDBG_UNDEFINED (1 << 0) 77#define UDBG_SYSCALL (1 << 1) 78#define UDBG_BADABORT (1 << 2) 79#define UDBG_SEGV (1 << 3) 80#define UDBG_BUS (1 << 4) 81 82extern unsigned int user_debug; 83 84#define vectors_base() (0) 85 86#define mb() __asm__ __volatile__ ("" : : : "memory") 87#define rmb() mb() 88#define wmb() mb() 89#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); 90 91#define read_barrier_depends() do { } while(0) 92#define set_mb(var, value) do { var = value; mb(); } while (0) 93 94/* 95 * We assume knowledge of how 96 * spin_unlock_irq() and friends are implemented. This avoids 97 * us needlessly decrementing and incrementing the preempt count. 98 */ 99#define prepare_arch_switch(next) local_irq_enable() 100#define finish_arch_switch(prev) spin_unlock(&(rq)->lock) 101 102/* 103 * switch_to(prev, next) should switch from task `prev' to `next' 104 * `prev' will never be the same as `next'. schedule() itself 105 * contains the memory barrier to tell GCC not to cache `current'. 106 */ 107extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *); 108 109#define switch_to(prev,next,last) \ 110do { \ 111 last = __switch_to(prev,task_thread_info(prev),task_thread_info(next)); \ 112} while (0) 113 114/* 115 * On SMP systems, when the scheduler does migration-cost autodetection, 116 * it needs a way to flush as much of the CPU's caches as possible. 117 * 118 * TODO: fill this in! 119 */ 120static inline void sched_cacheflush(void) 121{ 122} 123 124/* 125 * Save the current interrupt enable state & disable IRQs 126 */ 127#define local_irq_save(x) \ 128 do { \ 129 unsigned long temp; \ 130 __asm__ __volatile__( \ 131" mov %0, pc @ save_flags_cli\n" \ 132" orr %1, %0, #0x08000000\n" \ 133" and %0, %0, #0x0c000000\n" \ 134" teqp %1, #0\n" \ 135 : "=r" (x), "=r" (temp) \ 136 : \ 137 : "memory"); \ 138 } while (0) 139 140/* 141 * Enable IRQs (sti) 142 */ 143#define local_irq_enable() \ 144 do { \ 145 unsigned long temp; \ 146 __asm__ __volatile__( \ 147" mov %0, pc @ sti\n" \ 148" bic %0, %0, #0x08000000\n" \ 149" teqp %0, #0\n" \ 150 : "=r" (temp) \ 151 : \ 152 : "memory"); \ 153 } while(0) 154 155/* 156 * Disable IRQs (cli) 157 */ 158#define local_irq_disable() \ 159 do { \ 160 unsigned long temp; \ 161 __asm__ __volatile__( \ 162" mov %0, pc @ cli\n" \ 163" orr %0, %0, #0x08000000\n" \ 164" teqp %0, #0\n" \ 165 : "=r" (temp) \ 166 : \ 167 : "memory"); \ 168 } while(0) 169 170/* Enable FIQs (stf) */ 171 172#define __stf() do { \ 173 unsigned long temp; \ 174 __asm__ __volatile__( \ 175" mov %0, pc @ stf\n" \ 176" bic %0, %0, #0x04000000\n" \ 177" teqp %0, #0\n" \ 178 : "=r" (temp)); \ 179 } while(0) 180 181/* Disable FIQs (clf) */ 182 183#define __clf() do { \ 184 unsigned long temp; \ 185 __asm__ __volatile__( \ 186" mov %0, pc @ clf\n" \ 187" orr %0, %0, #0x04000000\n" \ 188" teqp %0, #0\n" \ 189 : "=r" (temp)); \ 190 } while(0) 191 192 193/* 194 * Save the current interrupt enable state. 195 */ 196#define local_save_flags(x) \ 197 do { \ 198 __asm__ __volatile__( \ 199" mov %0, pc @ save_flags\n" \ 200" and %0, %0, #0x0c000000\n" \ 201 : "=r" (x)); \ 202 } while (0) 203 204 205/* 206 * restore saved IRQ & FIQ state 207 */ 208#define local_irq_restore(x) \ 209 do { \ 210 unsigned long temp; \ 211 __asm__ __volatile__( \ 212" mov %0, pc @ restore_flags\n" \ 213" bic %0, %0, #0x0c000000\n" \ 214" orr %0, %0, %1\n" \ 215" teqp %0, #0\n" \ 216 : "=&r" (temp) \ 217 : "r" (x) \ 218 : "memory"); \ 219 } while (0) 220 221 222#ifdef CONFIG_SMP 223#error SMP not supported 224#endif 225 226#define smp_mb() barrier() 227#define smp_rmb() barrier() 228#define smp_wmb() barrier() 229#define smp_read_barrier_depends() do { } while(0) 230 231#define clf() __clf() 232#define stf() __stf() 233 234#define irqs_disabled() \ 235({ \ 236 unsigned long flags; \ 237 local_save_flags(flags); \ 238 flags & PSR_I_BIT; \ 239}) 240 241static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) 242{ 243 extern void __bad_xchg(volatile void *, int); 244 245 switch (size) { 246 case 1: return cpu_xchg_1(x, ptr); 247 case 4: return cpu_xchg_4(x, ptr); 248 default: __bad_xchg(ptr, size); 249 } 250 return 0; 251} 252 253#endif /* __ASSEMBLY__ */ 254 255#define arch_align_stack(x) (x) 256 257#endif /* __KERNEL__ */ 258 259#endif