at v2.6.18-rc2 272 lines 6.6 kB view raw
1#ifndef __ASM_SH_SYSTEM_H 2#define __ASM_SH_SYSTEM_H 3 4/* 5 * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima 6 * Copyright (C) 2002 Paul Mundt 7 */ 8 9 10/* 11 * switch_to() should switch tasks to task nr n, first 12 */ 13 14#define switch_to(prev, next, last) do { \ 15 struct task_struct *__last; \ 16 register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp; \ 17 register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc; \ 18 register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev; \ 19 register unsigned long *__ts5 __asm__ ("r5") = (unsigned long *)next; \ 20 register unsigned long *__ts6 __asm__ ("r6") = &next->thread.sp; \ 21 register unsigned long __ts7 __asm__ ("r7") = next->thread.pc; \ 22 __asm__ __volatile__ (".balign 4\n\t" \ 23 "stc.l gbr, @-r15\n\t" \ 24 "sts.l pr, @-r15\n\t" \ 25 "mov.l r8, @-r15\n\t" \ 26 "mov.l r9, @-r15\n\t" \ 27 "mov.l r10, @-r15\n\t" \ 28 "mov.l r11, @-r15\n\t" \ 29 "mov.l r12, @-r15\n\t" \ 30 "mov.l r13, @-r15\n\t" \ 31 "mov.l r14, @-r15\n\t" \ 32 "mov.l r15, @r1 ! save SP\n\t" \ 33 "mov.l @r6, r15 ! change to new stack\n\t" \ 34 "mova 1f, %0\n\t" \ 35 "mov.l %0, @r2 ! save PC\n\t" \ 36 "mov.l 2f, %0\n\t" \ 37 "jmp @%0 ! call __switch_to\n\t" \ 38 " lds r7, pr ! with return to new PC\n\t" \ 39 ".balign 4\n" \ 40 "2:\n\t" \ 41 ".long __switch_to\n" \ 42 "1:\n\t" \ 43 "mov.l @r15+, r14\n\t" \ 44 "mov.l @r15+, r13\n\t" \ 45 "mov.l @r15+, r12\n\t" \ 46 "mov.l @r15+, r11\n\t" \ 47 "mov.l @r15+, r10\n\t" \ 48 "mov.l @r15+, r9\n\t" \ 49 "mov.l @r15+, r8\n\t" \ 50 "lds.l @r15+, pr\n\t" \ 51 "ldc.l @r15+, gbr\n\t" \ 52 : "=z" (__last) \ 53 : "r" (__ts1), "r" (__ts2), "r" (__ts4), \ 54 "r" (__ts5), "r" (__ts6), "r" (__ts7) \ 55 : "r3", "t"); \ 56 last = __last; \ 57} while (0) 58 59/* 60 * On SMP systems, when the scheduler does migration-cost autodetection, 61 * it needs a way to flush as much of the CPU's caches as possible. 62 * 63 * TODO: fill this in! 64 */ 65static inline void sched_cacheflush(void) 66{ 67} 68 69#define nop() __asm__ __volatile__ ("nop") 70 71 72#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 73 74static __inline__ unsigned long tas(volatile int *m) 75{ /* #define tas(ptr) (xchg((ptr),1)) */ 76 unsigned long retval; 77 78 __asm__ __volatile__ ("tas.b @%1\n\t" 79 "movt %0" 80 : "=r" (retval): "r" (m): "t", "memory"); 81 return retval; 82} 83 84extern void __xchg_called_with_bad_pointer(void); 85 86#define mb() __asm__ __volatile__ ("": : :"memory") 87#define rmb() mb() 88#define wmb() __asm__ __volatile__ ("": : :"memory") 89#define read_barrier_depends() do { } while(0) 90 91#ifdef CONFIG_SMP 92#define smp_mb() mb() 93#define smp_rmb() rmb() 94#define smp_wmb() wmb() 95#define smp_read_barrier_depends() read_barrier_depends() 96#else 97#define smp_mb() barrier() 98#define smp_rmb() barrier() 99#define smp_wmb() barrier() 100#define smp_read_barrier_depends() do { } while(0) 101#endif 102 103#define set_mb(var, value) do { xchg(&var, value); } while (0) 104 105/* Interrupt Control */ 106static __inline__ void local_irq_enable(void) 107{ 108 unsigned long __dummy0, __dummy1; 109 110 __asm__ __volatile__("stc sr, %0\n\t" 111 "and %1, %0\n\t" 112 "stc r6_bank, %1\n\t" 113 "or %1, %0\n\t" 114 "ldc %0, sr" 115 : "=&r" (__dummy0), "=r" (__dummy1) 116 : "1" (~0x000000f0) 117 : "memory"); 118} 119 120static __inline__ void local_irq_disable(void) 121{ 122 unsigned long __dummy; 123 __asm__ __volatile__("stc sr, %0\n\t" 124 "or #0xf0, %0\n\t" 125 "ldc %0, sr" 126 : "=&z" (__dummy) 127 : /* no inputs */ 128 : "memory"); 129} 130 131#define local_save_flags(x) \ 132 __asm__("stc sr, %0; and #0xf0, %0" : "=&z" (x) :/**/: "memory" ) 133 134#define irqs_disabled() \ 135({ \ 136 unsigned long flags; \ 137 local_save_flags(flags); \ 138 (flags != 0); \ 139}) 140 141static __inline__ unsigned long local_irq_save(void) 142{ 143 unsigned long flags, __dummy; 144 145 __asm__ __volatile__("stc sr, %1\n\t" 146 "mov %1, %0\n\t" 147 "or #0xf0, %0\n\t" 148 "ldc %0, sr\n\t" 149 "mov %1, %0\n\t" 150 "and #0xf0, %0" 151 : "=&z" (flags), "=&r" (__dummy) 152 :/**/ 153 : "memory" ); 154 return flags; 155} 156 157#ifdef DEBUG_CLI_STI 158static __inline__ void local_irq_restore(unsigned long x) 159{ 160 if ((x & 0x000000f0) != 0x000000f0) 161 local_irq_enable(); 162 else { 163 unsigned long flags; 164 local_save_flags(flags); 165 166 if (flags == 0) { 167 extern void dump_stack(void); 168 printk(KERN_ERR "BUG!\n"); 169 dump_stack(); 170 local_irq_disable(); 171 } 172 } 173} 174#else 175#define local_irq_restore(x) do { \ 176 if ((x & 0x000000f0) != 0x000000f0) \ 177 local_irq_enable(); \ 178} while (0) 179#endif 180 181#define really_restore_flags(x) do { \ 182 if ((x & 0x000000f0) != 0x000000f0) \ 183 local_irq_enable(); \ 184 else \ 185 local_irq_disable(); \ 186} while (0) 187 188/* 189 * Jump to P2 area. 190 * When handling TLB or caches, we need to do it from P2 area. 191 */ 192#define jump_to_P2() \ 193do { \ 194 unsigned long __dummy; \ 195 __asm__ __volatile__( \ 196 "mov.l 1f, %0\n\t" \ 197 "or %1, %0\n\t" \ 198 "jmp @%0\n\t" \ 199 " nop\n\t" \ 200 ".balign 4\n" \ 201 "1: .long 2f\n" \ 202 "2:" \ 203 : "=&r" (__dummy) \ 204 : "r" (0x20000000)); \ 205} while (0) 206 207/* 208 * Back to P1 area. 209 */ 210#define back_to_P1() \ 211do { \ 212 unsigned long __dummy; \ 213 __asm__ __volatile__( \ 214 "nop;nop;nop;nop;nop;nop;nop\n\t" \ 215 "mov.l 1f, %0\n\t" \ 216 "jmp @%0\n\t" \ 217 " nop\n\t" \ 218 ".balign 4\n" \ 219 "1: .long 2f\n" \ 220 "2:" \ 221 : "=&r" (__dummy)); \ 222} while (0) 223 224/* For spinlocks etc */ 225#define local_irq_save(x) x = local_irq_save() 226 227static __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val) 228{ 229 unsigned long flags, retval; 230 231 local_irq_save(flags); 232 retval = *m; 233 *m = val; 234 local_irq_restore(flags); 235 return retval; 236} 237 238static __inline__ unsigned long xchg_u8(volatile unsigned char * m, unsigned long val) 239{ 240 unsigned long flags, retval; 241 242 local_irq_save(flags); 243 retval = *m; 244 *m = val & 0xff; 245 local_irq_restore(flags); 246 return retval; 247} 248 249static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, int size) 250{ 251 switch (size) { 252 case 4: 253 return xchg_u32(ptr, x); 254 break; 255 case 1: 256 return xchg_u8(ptr, x); 257 break; 258 } 259 __xchg_called_with_bad_pointer(); 260 return x; 261} 262 263/* XXX 264 * disable hlt during certain critical i/o operations 265 */ 266#define HAVE_DISABLE_HLT 267void disable_hlt(void); 268void enable_hlt(void); 269 270#define arch_align_stack(x) (x) 271 272#endif