at v2.6.14 149 lines 4.5 kB view raw
1#ifndef _H8300_SYSTEM_H 2#define _H8300_SYSTEM_H 3 4#include <linux/config.h> /* get configuration macros */ 5#include <linux/linkage.h> 6 7#define prepare_to_switch() do { } while(0) 8 9/* 10 * switch_to(n) should switch tasks to task ptr, first checking that 11 * ptr isn't the current task, in which case it does nothing. This 12 * also clears the TS-flag if the task we switched to has used the 13 * math co-processor latest. 14 */ 15/* 16 * switch_to() saves the extra registers, that are not saved 17 * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and 18 * a0-a1. Some of these are used by schedule() and its predecessors 19 * and so we might get see unexpected behaviors when a task returns 20 * with unexpected register values. 21 * 22 * syscall stores these registers itself and none of them are used 23 * by syscall after the function in the syscall has been called. 24 * 25 * Beware that resume now expects *next to be in d1 and the offset of 26 * tss to be in a1. This saves a few instructions as we no longer have 27 * to push them onto the stack and read them back right after. 28 * 29 * 02/17/96 - Jes Sorensen (jds@kom.auc.dk) 30 * 31 * Changed 96/09/19 by Andreas Schwab 32 * pass prev in a0, next in a1, offset of tss in d1, and whether 33 * the mm structures are shared in d2 (to avoid atc flushing). 34 * 35 * H8/300 Porting 2002/09/04 Yoshinori Sato 36 */ 37 38asmlinkage void resume(void); 39#define switch_to(prev,next,last) { \ 40 void *_last; \ 41 __asm__ __volatile__( \ 42 "mov.l %1, er0\n\t" \ 43 "mov.l %2, er1\n\t" \ 44 "mov.l %3, er2\n\t" \ 45 "jsr @_resume\n\t" \ 46 "mov.l er2,%0\n\t" \ 47 : "=r" (_last) \ 48 : "r" (&(prev->thread)), \ 49 "r" (&(next->thread)), \ 50 "g" (prev) \ 51 : "cc", "er0", "er1", "er2", "er3"); \ 52 (last) = _last; \ 53} 54 55#define __sti() asm volatile ("andc #0x7f,ccr") 56#define __cli() asm volatile ("orc #0x80,ccr") 57 58#define __save_flags(x) \ 59 asm volatile ("stc ccr,%w0":"=r" (x)) 60 61#define __restore_flags(x) \ 62 asm volatile ("ldc %w0,ccr": :"r" (x)) 63 64#define irqs_disabled() \ 65({ \ 66 unsigned char flags; \ 67 __save_flags(flags); \ 68 ((flags & 0x80) == 0x80); \ 69}) 70 71#define iret() __asm__ __volatile__ ("rte": : :"memory", "sp", "cc") 72 73/* For spinlocks etc */ 74#define local_irq_disable() __cli() 75#define local_irq_enable() __sti() 76#define local_irq_save(x) ({ __save_flags(x); local_irq_disable(); }) 77#define local_irq_restore(x) __restore_flags(x) 78#define local_save_flags(x) __save_flags(x) 79 80/* 81 * Force strict CPU ordering. 82 * Not really required on H8... 83 */ 84#define nop() asm volatile ("nop"::) 85#define mb() asm volatile ("" : : :"memory") 86#define rmb() asm volatile ("" : : :"memory") 87#define wmb() asm volatile ("" : : :"memory") 88#define set_rmb(var, value) do { xchg(&var, value); } while (0) 89#define set_mb(var, value) set_rmb(var, value) 90#define set_wmb(var, value) do { var = value; wmb(); } while (0) 91 92#ifdef CONFIG_SMP 93#define smp_mb() mb() 94#define smp_rmb() rmb() 95#define smp_wmb() wmb() 96#define smp_read_barrier_depends() read_barrier_depends() 97#else 98#define smp_mb() barrier() 99#define smp_rmb() barrier() 100#define smp_wmb() barrier() 101#define smp_read_barrier_depends() do { } while(0) 102#endif 103 104#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 105#define tas(ptr) (xchg((ptr),1)) 106 107struct __xchg_dummy { unsigned long a[100]; }; 108#define __xg(x) ((volatile struct __xchg_dummy *)(x)) 109 110static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) 111{ 112 unsigned long tmp, flags; 113 114 local_irq_save(flags); 115 116 switch (size) { 117 case 1: 118 __asm__ __volatile__ 119 ("mov.b %2,%0\n\t" 120 "mov.b %1,%2" 121 : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory"); 122 break; 123 case 2: 124 __asm__ __volatile__ 125 ("mov.w %2,%0\n\t" 126 "mov.w %1,%2" 127 : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory"); 128 break; 129 case 4: 130 __asm__ __volatile__ 131 ("mov.l %2,%0\n\t" 132 "mov.l %1,%2" 133 : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory"); 134 break; 135 default: 136 tmp = 0; 137 } 138 local_irq_restore(flags); 139 return tmp; 140} 141 142#define HARD_RESET_NOW() ({ \ 143 local_irq_disable(); \ 144 asm("jmp @@0"); \ 145}) 146 147#define arch_align_stack(x) (x) 148 149#endif /* _H8300_SYSTEM_H */