at v2.6.16-rc2 274 lines 6.7 kB view raw
1#ifndef __ASM_SH_SYSTEM_H 2#define __ASM_SH_SYSTEM_H 3 4/* 5 * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima 6 * Copyright (C) 2002 Paul Mundt 7 */ 8 9#include <linux/config.h> 10 11/* 12 * switch_to() should switch tasks to task nr n, first 13 */ 14 15#define switch_to(prev, next, last) do { \ 16 task_t *__last; \ 17 register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp; \ 18 register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc; \ 19 register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev; \ 20 register unsigned long *__ts5 __asm__ ("r5") = (unsigned long *)next; \ 21 register unsigned long *__ts6 __asm__ ("r6") = &next->thread.sp; \ 22 register unsigned long __ts7 __asm__ ("r7") = next->thread.pc; \ 23 __asm__ __volatile__ (".balign 4\n\t" \ 24 "stc.l gbr, @-r15\n\t" \ 25 "sts.l pr, @-r15\n\t" \ 26 "mov.l r8, @-r15\n\t" \ 27 "mov.l r9, @-r15\n\t" \ 28 "mov.l r10, @-r15\n\t" \ 29 "mov.l r11, @-r15\n\t" \ 30 "mov.l r12, @-r15\n\t" \ 31 "mov.l r13, @-r15\n\t" \ 32 "mov.l r14, @-r15\n\t" \ 33 "mov.l r15, @r1 ! save SP\n\t" \ 34 "mov.l @r6, r15 ! change to new stack\n\t" \ 35 "mova 1f, %0\n\t" \ 36 "mov.l %0, @r2 ! save PC\n\t" \ 37 "mov.l 2f, %0\n\t" \ 38 "jmp @%0 ! call __switch_to\n\t" \ 39 " lds r7, pr ! with return to new PC\n\t" \ 40 ".balign 4\n" \ 41 "2:\n\t" \ 42 ".long __switch_to\n" \ 43 "1:\n\t" \ 44 "mov.l @r15+, r14\n\t" \ 45 "mov.l @r15+, r13\n\t" \ 46 "mov.l @r15+, r12\n\t" \ 47 "mov.l @r15+, r11\n\t" \ 48 "mov.l @r15+, r10\n\t" \ 49 "mov.l @r15+, r9\n\t" \ 50 "mov.l @r15+, r8\n\t" \ 51 "lds.l @r15+, pr\n\t" \ 52 "ldc.l @r15+, gbr\n\t" \ 53 : "=z" (__last) \ 54 : "r" (__ts1), "r" (__ts2), "r" (__ts4), \ 55 "r" (__ts5), "r" (__ts6), "r" (__ts7) \ 56 : "r3", "t"); \ 57 last = __last; \ 58} while (0) 59 60/* 61 * On SMP systems, when the scheduler does migration-cost autodetection, 62 * it needs a way to flush as much of the CPU's caches as possible. 63 * 64 * TODO: fill this in! 65 */ 66static inline void sched_cacheflush(void) 67{ 68} 69 70#define nop() __asm__ __volatile__ ("nop") 71 72 73#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 74 75static __inline__ unsigned long tas(volatile int *m) 76{ /* #define tas(ptr) (xchg((ptr),1)) */ 77 unsigned long retval; 78 79 __asm__ __volatile__ ("tas.b @%1\n\t" 80 "movt %0" 81 : "=r" (retval): "r" (m): "t", "memory"); 82 return retval; 83} 84 85extern void __xchg_called_with_bad_pointer(void); 86 87#define mb() __asm__ __volatile__ ("": : :"memory") 88#define rmb() mb() 89#define wmb() __asm__ __volatile__ ("": : :"memory") 90#define read_barrier_depends() do { } while(0) 91 92#ifdef CONFIG_SMP 93#define smp_mb() mb() 94#define smp_rmb() rmb() 95#define smp_wmb() wmb() 96#define smp_read_barrier_depends() read_barrier_depends() 97#else 98#define smp_mb() barrier() 99#define smp_rmb() barrier() 100#define smp_wmb() barrier() 101#define smp_read_barrier_depends() do { } while(0) 102#endif 103 104#define set_mb(var, value) do { xchg(&var, value); } while (0) 105#define set_wmb(var, value) do { var = value; wmb(); } while (0) 106 107/* Interrupt Control */ 108static __inline__ void local_irq_enable(void) 109{ 110 unsigned long __dummy0, __dummy1; 111 112 __asm__ __volatile__("stc sr, %0\n\t" 113 "and %1, %0\n\t" 114 "stc r6_bank, %1\n\t" 115 "or %1, %0\n\t" 116 "ldc %0, sr" 117 : "=&r" (__dummy0), "=r" (__dummy1) 118 : "1" (~0x000000f0) 119 : "memory"); 120} 121 122static __inline__ void local_irq_disable(void) 123{ 124 unsigned long __dummy; 125 __asm__ __volatile__("stc sr, %0\n\t" 126 "or #0xf0, %0\n\t" 127 "ldc %0, sr" 128 : "=&z" (__dummy) 129 : /* no inputs */ 130 : "memory"); 131} 132 133#define local_save_flags(x) \ 134 __asm__("stc sr, %0; and #0xf0, %0" : "=&z" (x) :/**/: "memory" ) 135 136#define irqs_disabled() \ 137({ \ 138 unsigned long flags; \ 139 local_save_flags(flags); \ 140 (flags != 0); \ 141}) 142 143static __inline__ unsigned long local_irq_save(void) 144{ 145 unsigned long flags, __dummy; 146 147 __asm__ __volatile__("stc sr, %1\n\t" 148 "mov %1, %0\n\t" 149 "or #0xf0, %0\n\t" 150 "ldc %0, sr\n\t" 151 "mov %1, %0\n\t" 152 "and #0xf0, %0" 153 : "=&z" (flags), "=&r" (__dummy) 154 :/**/ 155 : "memory" ); 156 return flags; 157} 158 159#ifdef DEBUG_CLI_STI 160static __inline__ void local_irq_restore(unsigned long x) 161{ 162 if ((x & 0x000000f0) != 0x000000f0) 163 local_irq_enable(); 164 else { 165 unsigned long flags; 166 local_save_flags(flags); 167 168 if (flags == 0) { 169 extern void dump_stack(void); 170 printk(KERN_ERR "BUG!\n"); 171 dump_stack(); 172 local_irq_disable(); 173 } 174 } 175} 176#else 177#define local_irq_restore(x) do { \ 178 if ((x & 0x000000f0) != 0x000000f0) \ 179 local_irq_enable(); \ 180} while (0) 181#endif 182 183#define really_restore_flags(x) do { \ 184 if ((x & 0x000000f0) != 0x000000f0) \ 185 local_irq_enable(); \ 186 else \ 187 local_irq_disable(); \ 188} while (0) 189 190/* 191 * Jump to P2 area. 192 * When handling TLB or caches, we need to do it from P2 area. 193 */ 194#define jump_to_P2() \ 195do { \ 196 unsigned long __dummy; \ 197 __asm__ __volatile__( \ 198 "mov.l 1f, %0\n\t" \ 199 "or %1, %0\n\t" \ 200 "jmp @%0\n\t" \ 201 " nop\n\t" \ 202 ".balign 4\n" \ 203 "1: .long 2f\n" \ 204 "2:" \ 205 : "=&r" (__dummy) \ 206 : "r" (0x20000000)); \ 207} while (0) 208 209/* 210 * Back to P1 area. 211 */ 212#define back_to_P1() \ 213do { \ 214 unsigned long __dummy; \ 215 __asm__ __volatile__( \ 216 "nop;nop;nop;nop;nop;nop;nop\n\t" \ 217 "mov.l 1f, %0\n\t" \ 218 "jmp @%0\n\t" \ 219 " nop\n\t" \ 220 ".balign 4\n" \ 221 "1: .long 2f\n" \ 222 "2:" \ 223 : "=&r" (__dummy)); \ 224} while (0) 225 226/* For spinlocks etc */ 227#define local_irq_save(x) x = local_irq_save() 228 229static __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val) 230{ 231 unsigned long flags, retval; 232 233 local_irq_save(flags); 234 retval = *m; 235 *m = val; 236 local_irq_restore(flags); 237 return retval; 238} 239 240static __inline__ unsigned long xchg_u8(volatile unsigned char * m, unsigned long val) 241{ 242 unsigned long flags, retval; 243 244 local_irq_save(flags); 245 retval = *m; 246 *m = val & 0xff; 247 local_irq_restore(flags); 248 return retval; 249} 250 251static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, int size) 252{ 253 switch (size) { 254 case 4: 255 return xchg_u32(ptr, x); 256 break; 257 case 1: 258 return xchg_u8(ptr, x); 259 break; 260 } 261 __xchg_called_with_bad_pointer(); 262 return x; 263} 264 265/* XXX 266 * disable hlt during certain critical i/o operations 267 */ 268#define HAVE_DISABLE_HLT 269void disable_hlt(void); 270void enable_hlt(void); 271 272#define arch_align_stack(x) (x) 273 274#endif