at v2.6.13-rc2 264 lines 6.5 kB view raw
1#ifndef __ASM_SH_SYSTEM_H 2#define __ASM_SH_SYSTEM_H 3 4/* 5 * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima 6 * Copyright (C) 2002 Paul Mundt 7 */ 8 9#include <linux/config.h> 10 11/* 12 * switch_to() should switch tasks to task nr n, first 13 */ 14 15#define switch_to(prev, next, last) do { \ 16 task_t *__last; \ 17 register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp; \ 18 register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc; \ 19 register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev; \ 20 register unsigned long *__ts5 __asm__ ("r5") = (unsigned long *)next; \ 21 register unsigned long *__ts6 __asm__ ("r6") = &next->thread.sp; \ 22 register unsigned long __ts7 __asm__ ("r7") = next->thread.pc; \ 23 __asm__ __volatile__ (".balign 4\n\t" \ 24 "stc.l gbr, @-r15\n\t" \ 25 "sts.l pr, @-r15\n\t" \ 26 "mov.l r8, @-r15\n\t" \ 27 "mov.l r9, @-r15\n\t" \ 28 "mov.l r10, @-r15\n\t" \ 29 "mov.l r11, @-r15\n\t" \ 30 "mov.l r12, @-r15\n\t" \ 31 "mov.l r13, @-r15\n\t" \ 32 "mov.l r14, @-r15\n\t" \ 33 "mov.l r15, @r1 ! save SP\n\t" \ 34 "mov.l @r6, r15 ! change to new stack\n\t" \ 35 "mova 1f, %0\n\t" \ 36 "mov.l %0, @r2 ! save PC\n\t" \ 37 "mov.l 2f, %0\n\t" \ 38 "jmp @%0 ! call __switch_to\n\t" \ 39 " lds r7, pr ! with return to new PC\n\t" \ 40 ".balign 4\n" \ 41 "2:\n\t" \ 42 ".long __switch_to\n" \ 43 "1:\n\t" \ 44 "mov.l @r15+, r14\n\t" \ 45 "mov.l @r15+, r13\n\t" \ 46 "mov.l @r15+, r12\n\t" \ 47 "mov.l @r15+, r11\n\t" \ 48 "mov.l @r15+, r10\n\t" \ 49 "mov.l @r15+, r9\n\t" \ 50 "mov.l @r15+, r8\n\t" \ 51 "lds.l @r15+, pr\n\t" \ 52 "ldc.l @r15+, gbr\n\t" \ 53 : "=z" (__last) \ 54 : "r" (__ts1), "r" (__ts2), "r" (__ts4), \ 55 "r" (__ts5), "r" (__ts6), "r" (__ts7) \ 56 : "r3", "t"); \ 57 last = __last; \ 58} while (0) 59 60#define nop() __asm__ __volatile__ ("nop") 61 62 63#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 64 65static __inline__ unsigned long tas(volatile int *m) 66{ /* #define tas(ptr) (xchg((ptr),1)) */ 67 unsigned long retval; 68 69 __asm__ __volatile__ ("tas.b @%1\n\t" 70 "movt %0" 71 : "=r" (retval): "r" (m): "t", "memory"); 72 return retval; 73} 74 75extern void __xchg_called_with_bad_pointer(void); 76 77#define mb() __asm__ __volatile__ ("": : :"memory") 78#define rmb() mb() 79#define wmb() __asm__ __volatile__ ("": : :"memory") 80#define read_barrier_depends() do { } while(0) 81 82#ifdef CONFIG_SMP 83#define smp_mb() mb() 84#define smp_rmb() rmb() 85#define smp_wmb() wmb() 86#define smp_read_barrier_depends() read_barrier_depends() 87#else 88#define smp_mb() barrier() 89#define smp_rmb() barrier() 90#define smp_wmb() barrier() 91#define smp_read_barrier_depends() do { } while(0) 92#endif 93 94#define set_mb(var, value) do { xchg(&var, value); } while (0) 95#define set_wmb(var, value) do { var = value; wmb(); } while (0) 96 97/* Interrupt Control */ 98static __inline__ void local_irq_enable(void) 99{ 100 unsigned long __dummy0, __dummy1; 101 102 __asm__ __volatile__("stc sr, %0\n\t" 103 "and %1, %0\n\t" 104 "stc r6_bank, %1\n\t" 105 "or %1, %0\n\t" 106 "ldc %0, sr" 107 : "=&r" (__dummy0), "=r" (__dummy1) 108 : "1" (~0x000000f0) 109 : "memory"); 110} 111 112static __inline__ void local_irq_disable(void) 113{ 114 unsigned long __dummy; 115 __asm__ __volatile__("stc sr, %0\n\t" 116 "or #0xf0, %0\n\t" 117 "ldc %0, sr" 118 : "=&z" (__dummy) 119 : /* no inputs */ 120 : "memory"); 121} 122 123#define local_save_flags(x) \ 124 __asm__("stc sr, %0; and #0xf0, %0" : "=&z" (x) :/**/: "memory" ) 125 126#define irqs_disabled() \ 127({ \ 128 unsigned long flags; \ 129 local_save_flags(flags); \ 130 (flags != 0); \ 131}) 132 133static __inline__ unsigned long local_irq_save(void) 134{ 135 unsigned long flags, __dummy; 136 137 __asm__ __volatile__("stc sr, %1\n\t" 138 "mov %1, %0\n\t" 139 "or #0xf0, %0\n\t" 140 "ldc %0, sr\n\t" 141 "mov %1, %0\n\t" 142 "and #0xf0, %0" 143 : "=&z" (flags), "=&r" (__dummy) 144 :/**/ 145 : "memory" ); 146 return flags; 147} 148 149#ifdef DEBUG_CLI_STI 150static __inline__ void local_irq_restore(unsigned long x) 151{ 152 if ((x & 0x000000f0) != 0x000000f0) 153 local_irq_enable(); 154 else { 155 unsigned long flags; 156 local_save_flags(flags); 157 158 if (flags == 0) { 159 extern void dump_stack(void); 160 printk(KERN_ERR "BUG!\n"); 161 dump_stack(); 162 local_irq_disable(); 163 } 164 } 165} 166#else 167#define local_irq_restore(x) do { \ 168 if ((x & 0x000000f0) != 0x000000f0) \ 169 local_irq_enable(); \ 170} while (0) 171#endif 172 173#define really_restore_flags(x) do { \ 174 if ((x & 0x000000f0) != 0x000000f0) \ 175 local_irq_enable(); \ 176 else \ 177 local_irq_disable(); \ 178} while (0) 179 180/* 181 * Jump to P2 area. 182 * When handling TLB or caches, we need to do it from P2 area. 183 */ 184#define jump_to_P2() \ 185do { \ 186 unsigned long __dummy; \ 187 __asm__ __volatile__( \ 188 "mov.l 1f, %0\n\t" \ 189 "or %1, %0\n\t" \ 190 "jmp @%0\n\t" \ 191 " nop\n\t" \ 192 ".balign 4\n" \ 193 "1: .long 2f\n" \ 194 "2:" \ 195 : "=&r" (__dummy) \ 196 : "r" (0x20000000)); \ 197} while (0) 198 199/* 200 * Back to P1 area. 201 */ 202#define back_to_P1() \ 203do { \ 204 unsigned long __dummy; \ 205 __asm__ __volatile__( \ 206 "nop;nop;nop;nop;nop;nop;nop\n\t" \ 207 "mov.l 1f, %0\n\t" \ 208 "jmp @%0\n\t" \ 209 " nop\n\t" \ 210 ".balign 4\n" \ 211 "1: .long 2f\n" \ 212 "2:" \ 213 : "=&r" (__dummy)); \ 214} while (0) 215 216/* For spinlocks etc */ 217#define local_irq_save(x) x = local_irq_save() 218 219static __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val) 220{ 221 unsigned long flags, retval; 222 223 local_irq_save(flags); 224 retval = *m; 225 *m = val; 226 local_irq_restore(flags); 227 return retval; 228} 229 230static __inline__ unsigned long xchg_u8(volatile unsigned char * m, unsigned long val) 231{ 232 unsigned long flags, retval; 233 234 local_irq_save(flags); 235 retval = *m; 236 *m = val & 0xff; 237 local_irq_restore(flags); 238 return retval; 239} 240 241static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, int size) 242{ 243 switch (size) { 244 case 4: 245 return xchg_u32(ptr, x); 246 break; 247 case 1: 248 return xchg_u8(ptr, x); 249 break; 250 } 251 __xchg_called_with_bad_pointer(); 252 return x; 253} 254 255/* XXX 256 * disable hlt during certain critical i/o operations 257 */ 258#define HAVE_DISABLE_HLT 259void disable_hlt(void); 260void enable_hlt(void); 261 262#define arch_align_stack(x) (x) 263 264#endif