at v2.6.16-rc2 236 lines 5.9 kB view raw
1/* 2 * include/asm-xtensa/system.h 3 * 4 * This file is subject to the terms and conditions of the GNU General Public 5 * License. See the file "COPYING" in the main directory of this archive 6 * for more details. 7 * 8 * Copyright (C) 2001 - 2005 Tensilica Inc. 9 */ 10 11#ifndef _XTENSA_SYSTEM_H 12#define _XTENSA_SYSTEM_H 13 14#include <linux/config.h> 15#include <linux/stringify.h> 16 17#include <asm/processor.h> 18 19/* interrupt control */ 20 21#define local_save_flags(x) \ 22 __asm__ __volatile__ ("rsr %0,"__stringify(PS) : "=a" (x)); 23#define local_irq_restore(x) do { \ 24 __asm__ __volatile__ ("wsr %0, "__stringify(PS)" ; rsync" \ 25 :: "a" (x) : "memory"); } while(0); 26#define local_irq_save(x) do { \ 27 __asm__ __volatile__ ("rsil %0, "__stringify(LOCKLEVEL) \ 28 : "=a" (x) :: "memory");} while(0); 29 30static inline void local_irq_disable(void) 31{ 32 unsigned long flags; 33 __asm__ __volatile__ ("rsil %0, "__stringify(LOCKLEVEL) 34 : "=a" (flags) :: "memory"); 35} 36static inline void local_irq_enable(void) 37{ 38 unsigned long flags; 39 __asm__ __volatile__ ("rsil %0, 0" : "=a" (flags) :: "memory"); 40 41} 42 43static inline int irqs_disabled(void) 44{ 45 unsigned long flags; 46 local_save_flags(flags); 47 return flags & 0xf; 48} 49 50#define RSR_CPENABLE(x) do { \ 51 __asm__ __volatile__("rsr %0," __stringify(CPENABLE) : "=a" (x)); \ 52 } while(0); 53#define WSR_CPENABLE(x) do { \ 54 __asm__ __volatile__("wsr %0," __stringify(CPENABLE)";rsync" \ 55 :: "a" (x));} while(0); 56 57#define clear_cpenable() __clear_cpenable() 58 59static inline void __clear_cpenable(void) 60{ 61#if XCHAL_HAVE_CP 62 unsigned long i = 0; 63 WSR_CPENABLE(i); 64#endif 65} 66 67static inline void enable_coprocessor(int i) 68{ 69#if XCHAL_HAVE_CP 70 int cp; 71 RSR_CPENABLE(cp); 72 cp |= 1 << i; 73 WSR_CPENABLE(cp); 74#endif 75} 76 77static inline void disable_coprocessor(int i) 78{ 79#if XCHAL_HAVE_CP 80 int cp; 81 RSR_CPENABLE(cp); 82 cp &= ~(1 << i); 83 WSR_CPENABLE(cp); 84#endif 85} 86 87#define smp_read_barrier_depends() do { } while(0) 88#define read_barrier_depends() do { } while(0) 89 90#define mb() barrier() 91#define rmb() mb() 92#define wmb() mb() 93 94#ifdef CONFIG_SMP 95#error smp_* not defined 96#else 97#define smp_mb() barrier() 98#define smp_rmb() barrier() 99#define smp_wmb() barrier() 100#endif 101 102#define set_mb(var, value) do { var = value; mb(); } while (0) 103#define set_wmb(var, value) do { var = value; wmb(); } while (0) 104 105#if !defined (__ASSEMBLY__) 106 107/* * switch_to(n) should switch tasks to task nr n, first 108 * checking that n isn't the current task, in which case it does nothing. 109 */ 110extern void *_switch_to(void *last, void *next); 111 112#endif /* __ASSEMBLY__ */ 113 114#define prepare_to_switch() do { } while(0) 115 116#define switch_to(prev,next,last) \ 117do { \ 118 clear_cpenable(); \ 119 (last) = _switch_to(prev, next); \ 120} while(0) 121 122/* 123 * cmpxchg 124 */ 125 126static inline unsigned long 127__cmpxchg_u32(volatile int *p, int old, int new) 128{ 129 __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t" 130 "l32i %0, %1, 0 \n\t" 131 "bne %0, %2, 1f \n\t" 132 "s32i %3, %1, 0 \n\t" 133 "1: \n\t" 134 "wsr a15, "__stringify(PS)" \n\t" 135 "rsync \n\t" 136 : "=&a" (old) 137 : "a" (p), "a" (old), "r" (new) 138 : "a15", "memory"); 139 return old; 140} 141/* This function doesn't exist, so you'll get a linker error 142 * if something tries to do an invalid cmpxchg(). */ 143 144extern void __cmpxchg_called_with_bad_pointer(void); 145 146static __inline__ unsigned long 147__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) 148{ 149 switch (size) { 150 case 4: return __cmpxchg_u32(ptr, old, new); 151 default: __cmpxchg_called_with_bad_pointer(); 152 return old; 153 } 154} 155 156#define cmpxchg(ptr,o,n) \ 157 ({ __typeof__(*(ptr)) _o_ = (o); \ 158 __typeof__(*(ptr)) _n_ = (n); \ 159 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ 160 (unsigned long)_n_, sizeof (*(ptr))); \ 161 }) 162 163 164 165 166/* 167 * xchg_u32 168 * 169 * Note that a15 is used here because the register allocation 170 * done by the compiler is not guaranteed and a window overflow 171 * may not occur between the rsil and wsr instructions. By using 172 * a15 in the rsil, the machine is guaranteed to be in a state 173 * where no register reference will cause an overflow. 174 */ 175 176static inline unsigned long xchg_u32(volatile int * m, unsigned long val) 177{ 178 unsigned long tmp; 179 __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t" 180 "l32i %0, %1, 0 \n\t" 181 "s32i %2, %1, 0 \n\t" 182 "wsr a15, "__stringify(PS)" \n\t" 183 "rsync \n\t" 184 : "=&a" (tmp) 185 : "a" (m), "a" (val) 186 : "a15", "memory"); 187 return tmp; 188} 189 190#define tas(ptr) (xchg((ptr),1)) 191 192#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 193 194/* 195 * This only works if the compiler isn't horribly bad at optimizing. 196 * gcc-2.5.8 reportedly can't handle this, but I define that one to 197 * be dead anyway. 198 */ 199 200extern void __xchg_called_with_bad_pointer(void); 201 202static __inline__ unsigned long 203__xchg(unsigned long x, volatile void * ptr, int size) 204{ 205 switch (size) { 206 case 4: 207 return xchg_u32(ptr, x); 208 } 209 __xchg_called_with_bad_pointer(); 210 return x; 211} 212 213extern void set_except_vector(int n, void *addr); 214 215static inline void spill_registers(void) 216{ 217 unsigned int a0, ps; 218 219 __asm__ __volatile__ ( 220 "movi a14," __stringify (PS_EXCM_MASK) " | 1\n\t" 221 "mov a12, a0\n\t" 222 "rsr a13," __stringify(SAR) "\n\t" 223 "xsr a14," __stringify(PS) "\n\t" 224 "movi a0, _spill_registers\n\t" 225 "rsync\n\t" 226 "callx0 a0\n\t" 227 "mov a0, a12\n\t" 228 "wsr a13," __stringify(SAR) "\n\t" 229 "wsr a14," __stringify(PS) "\n\t" 230 :: "a" (&a0), "a" (&ps) 231 : "a2", "a3", "a12", "a13", "a14", "a15", "memory"); 232} 233 234#define arch_align_stack(x) (x) 235 236#endif /* _XTENSA_SYSTEM_H */