Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.22-rc2 257 lines 7.8 kB view raw
1#ifndef __ASM_ARM_SYSTEM_H 2#define __ASM_ARM_SYSTEM_H 3 4#ifdef __KERNEL__ 5 6 7/* 8 * This is used to ensure the compiler did actually allocate the register we 9 * asked it for some inline assembly sequences. Apparently we can't trust 10 * the compiler from one version to another so a bit of paranoia won't hurt. 11 * This string is meant to be concatenated with the inline asm string and 12 * will cause compilation to stop on mismatch. (From ARM32 - may come in handy) 13 */ 14#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" 15 16#ifndef __ASSEMBLY__ 17 18#include <linux/linkage.h> 19 20struct thread_info; 21struct task_struct; 22 23#if 0 24/* information about the system we're running on */ 25extern unsigned int system_rev; 26extern unsigned int system_serial_low; 27extern unsigned int system_serial_high; 28extern unsigned int mem_fclk_21285; 29 30FIXME - sort this 31/* 32 * We need to turn the caches off before calling the reset vector - RiscOS 33 * messes up if we don't 34 */ 35#define proc_hard_reset() cpu_proc_fin() 36 37#endif 38 39struct pt_regs; 40 41void die(const char *msg, struct pt_regs *regs, int err) 42 __attribute__((noreturn)); 43 44void die_if_kernel(const char *str, struct pt_regs *regs, int err); 45 46void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, 47 struct pt_regs *), 48 int sig, const char *name); 49 50#include <asm/proc-fns.h> 51 52#define xchg(ptr,x) \ 53 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 54 55extern asmlinkage void __backtrace(void); 56 57#define set_cr(x) \ 58 __asm__ __volatile__( \ 59 "mcr p15, 0, %0, c1, c0, 0 @ set CR" \ 60 : : "r" (x) : "cc") 61 62#define get_cr() \ 63 ({ \ 64 unsigned int __val; \ 65 __asm__ __volatile__( \ 66 "mrc p15, 0, %0, c1, c0, 0 @ get CR" \ 67 : "=r" (__val) : : "cc"); \ 68 __val; \ 69 }) 70 71extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ 72extern unsigned long cr_alignment; /* defined in entry-armv.S */ 73 74#define UDBG_UNDEFINED (1 << 0) 75#define UDBG_SYSCALL (1 << 1) 76#define UDBG_BADABORT (1 << 2) 77#define UDBG_SEGV (1 << 3) 78#define UDBG_BUS (1 << 4) 79 80extern unsigned int user_debug; 81 82#define vectors_base() (0) 83 84#define mb() __asm__ __volatile__ ("" : : : "memory") 85#define rmb() mb() 86#define wmb() mb() 87#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); 88 89#define read_barrier_depends() do { } while(0) 90#define set_mb(var, value) do { var = value; mb(); } while (0) 91 92/* 93 * We assume knowledge of how 94 * spin_unlock_irq() and friends are implemented. This avoids 95 * us needlessly decrementing and incrementing the preempt count. 96 */ 97#define prepare_arch_switch(next) local_irq_enable() 98#define finish_arch_switch(prev) spin_unlock(&(rq)->lock) 99 100/* 101 * switch_to(prev, next) should switch from task `prev' to `next' 102 * `prev' will never be the same as `next'. schedule() itself 103 * contains the memory barrier to tell GCC not to cache `current'. 104 */ 105extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *); 106 107#define switch_to(prev,next,last) \ 108do { \ 109 last = __switch_to(prev,task_thread_info(prev),task_thread_info(next)); \ 110} while (0) 111 112/* 113 * On SMP systems, when the scheduler does migration-cost autodetection, 114 * it needs a way to flush as much of the CPU's caches as possible. 115 * 116 * TODO: fill this in! 117 */ 118static inline void sched_cacheflush(void) 119{ 120} 121 122/* 123 * Save the current interrupt enable state & disable IRQs 124 */ 125#define local_irq_save(x) \ 126 do { \ 127 unsigned long temp; \ 128 __asm__ __volatile__( \ 129" mov %0, pc @ save_flags_cli\n" \ 130" orr %1, %0, #0x08000000\n" \ 131" and %0, %0, #0x0c000000\n" \ 132" teqp %1, #0\n" \ 133 : "=r" (x), "=r" (temp) \ 134 : \ 135 : "memory"); \ 136 } while (0) 137 138/* 139 * Enable IRQs (sti) 140 */ 141#define local_irq_enable() \ 142 do { \ 143 unsigned long temp; \ 144 __asm__ __volatile__( \ 145" mov %0, pc @ sti\n" \ 146" bic %0, %0, #0x08000000\n" \ 147" teqp %0, #0\n" \ 148 : "=r" (temp) \ 149 : \ 150 : "memory"); \ 151 } while(0) 152 153/* 154 * Disable IRQs (cli) 155 */ 156#define local_irq_disable() \ 157 do { \ 158 unsigned long temp; \ 159 __asm__ __volatile__( \ 160" mov %0, pc @ cli\n" \ 161" orr %0, %0, #0x08000000\n" \ 162" teqp %0, #0\n" \ 163 : "=r" (temp) \ 164 : \ 165 : "memory"); \ 166 } while(0) 167 168/* Enable FIQs (stf) */ 169 170#define __stf() do { \ 171 unsigned long temp; \ 172 __asm__ __volatile__( \ 173" mov %0, pc @ stf\n" \ 174" bic %0, %0, #0x04000000\n" \ 175" teqp %0, #0\n" \ 176 : "=r" (temp)); \ 177 } while(0) 178 179/* Disable FIQs (clf) */ 180 181#define __clf() do { \ 182 unsigned long temp; \ 183 __asm__ __volatile__( \ 184" mov %0, pc @ clf\n" \ 185" orr %0, %0, #0x04000000\n" \ 186" teqp %0, #0\n" \ 187 : "=r" (temp)); \ 188 } while(0) 189 190 191/* 192 * Save the current interrupt enable state. 193 */ 194#define local_save_flags(x) \ 195 do { \ 196 __asm__ __volatile__( \ 197" mov %0, pc @ save_flags\n" \ 198" and %0, %0, #0x0c000000\n" \ 199 : "=r" (x)); \ 200 } while (0) 201 202 203/* 204 * restore saved IRQ & FIQ state 205 */ 206#define local_irq_restore(x) \ 207 do { \ 208 unsigned long temp; \ 209 __asm__ __volatile__( \ 210" mov %0, pc @ restore_flags\n" \ 211" bic %0, %0, #0x0c000000\n" \ 212" orr %0, %0, %1\n" \ 213" teqp %0, #0\n" \ 214 : "=&r" (temp) \ 215 : "r" (x) \ 216 : "memory"); \ 217 } while (0) 218 219 220#ifdef CONFIG_SMP 221#error SMP not supported 222#endif 223 224#define smp_mb() barrier() 225#define smp_rmb() barrier() 226#define smp_wmb() barrier() 227#define smp_read_barrier_depends() do { } while(0) 228 229#define clf() __clf() 230#define stf() __stf() 231 232#define irqs_disabled() \ 233({ \ 234 unsigned long flags; \ 235 local_save_flags(flags); \ 236 flags & PSR_I_BIT; \ 237}) 238 239static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) 240{ 241 extern void __bad_xchg(volatile void *, int); 242 243 switch (size) { 244 case 1: return cpu_xchg_1(x, ptr); 245 case 4: return cpu_xchg_4(x, ptr); 246 default: __bad_xchg(ptr, size); 247 } 248 return 0; 249} 250 251#endif /* __ASSEMBLY__ */ 252 253#define arch_align_stack(x) (x) 254 255#endif /* __KERNEL__ */ 256 257#endif