at v2.6.35 239 lines 6.0 kB view raw
1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle 7 * Copyright (C) 1996 by Paul M. Antoine 8 * Copyright (C) 1999 Silicon Graphics 9 * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com 10 * Copyright (C) 2000 MIPS Technologies, Inc. 11 */ 12#ifndef _ASM_SYSTEM_H 13#define _ASM_SYSTEM_H 14 15#include <linux/kernel.h> 16#include <linux/types.h> 17#include <linux/irqflags.h> 18 19#include <asm/addrspace.h> 20#include <asm/barrier.h> 21#include <asm/cmpxchg.h> 22#include <asm/cpu-features.h> 23#include <asm/dsp.h> 24#include <asm/watch.h> 25#include <asm/war.h> 26 27 28/* 29 * switch_to(n) should switch tasks to task nr n, first 30 * checking that n isn't the current task, in which case it does nothing. 31 */ 32extern asmlinkage void *resume(void *last, void *next, void *next_ti); 33 34struct task_struct; 35 36extern unsigned int ll_bit; 37extern struct task_struct *ll_task; 38 39#ifdef CONFIG_MIPS_MT_FPAFF 40 41/* 42 * Handle the scheduler resume end of FPU affinity management. We do this 43 * inline to try to keep the overhead down. If we have been forced to run on 44 * a "CPU" with an FPU because of a previous high level of FP computation, 45 * but did not actually use the FPU during the most recent time-slice (CU1 46 * isn't set), we undo the restriction on cpus_allowed. 47 * 48 * We're not calling set_cpus_allowed() here, because we have no need to 49 * force prompt migration - we're already switching the current CPU to a 50 * different thread. 51 */ 52 53#define __mips_mt_fpaff_switch_to(prev) \ 54do { \ 55 struct thread_info *__prev_ti = task_thread_info(prev); \ 56 \ 57 if (cpu_has_fpu && \ 58 test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) && \ 59 (!(KSTK_STATUS(prev) & ST0_CU1))) { \ 60 clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND); \ 61 prev->cpus_allowed = prev->thread.user_cpus_allowed; \ 62 } \ 63 next->thread.emulated_fp = 0; \ 64} while(0) 65 66#else 67#define __mips_mt_fpaff_switch_to(prev) do { (void) (prev); } while (0) 68#endif 69 70#define __clear_software_ll_bit() \ 71do { \ 72 if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc) \ 73 ll_bit = 0; \ 74} while (0) 75 76#define switch_to(prev, next, last) \ 77do { \ 78 __mips_mt_fpaff_switch_to(prev); \ 79 if (cpu_has_dsp) \ 80 __save_dsp(prev); \ 81 __clear_software_ll_bit(); \ 82 (last) = resume(prev, next, task_thread_info(next)); \ 83} while (0) 84 85#define finish_arch_switch(prev) \ 86do { \ 87 if (cpu_has_dsp) \ 88 __restore_dsp(current); \ 89 if (cpu_has_userlocal) \ 90 write_c0_userlocal(current_thread_info()->tp_value); \ 91 __restore_watch(); \ 92} while (0) 93 94static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) 95{ 96 __u32 retval; 97 98 smp_mb__before_llsc(); 99 100 if (kernel_uses_llsc && R10000_LLSC_WAR) { 101 unsigned long dummy; 102 103 __asm__ __volatile__( 104 " .set mips3 \n" 105 "1: ll %0, %3 # xchg_u32 \n" 106 " .set mips0 \n" 107 " move %2, %z4 \n" 108 " .set mips3 \n" 109 " sc %2, %1 \n" 110 " beqzl %2, 1b \n" 111 " .set mips0 \n" 112 : "=&r" (retval), "=m" (*m), "=&r" (dummy) 113 : "R" (*m), "Jr" (val) 114 : "memory"); 115 } else if (kernel_uses_llsc) { 116 unsigned long dummy; 117 118 __asm__ __volatile__( 119 " .set mips3 \n" 120 "1: ll %0, %3 # xchg_u32 \n" 121 " .set mips0 \n" 122 " move %2, %z4 \n" 123 " .set mips3 \n" 124 " sc %2, %1 \n" 125 " beqz %2, 2f \n" 126 " .subsection 2 \n" 127 "2: b 1b \n" 128 " .previous \n" 129 " .set mips0 \n" 130 : "=&r" (retval), "=m" (*m), "=&r" (dummy) 131 : "R" (*m), "Jr" (val) 132 : "memory"); 133 } else { 134 unsigned long flags; 135 136 raw_local_irq_save(flags); 137 retval = *m; 138 *m = val; 139 raw_local_irq_restore(flags); /* implies memory barrier */ 140 } 141 142 smp_llsc_mb(); 143 144 return retval; 145} 146 147#ifdef CONFIG_64BIT 148static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) 149{ 150 __u64 retval; 151 152 smp_mb__before_llsc(); 153 154 if (kernel_uses_llsc && R10000_LLSC_WAR) { 155 unsigned long dummy; 156 157 __asm__ __volatile__( 158 " .set mips3 \n" 159 "1: lld %0, %3 # xchg_u64 \n" 160 " move %2, %z4 \n" 161 " scd %2, %1 \n" 162 " beqzl %2, 1b \n" 163 " .set mips0 \n" 164 : "=&r" (retval), "=m" (*m), "=&r" (dummy) 165 : "R" (*m), "Jr" (val) 166 : "memory"); 167 } else if (kernel_uses_llsc) { 168 unsigned long dummy; 169 170 __asm__ __volatile__( 171 " .set mips3 \n" 172 "1: lld %0, %3 # xchg_u64 \n" 173 " move %2, %z4 \n" 174 " scd %2, %1 \n" 175 " beqz %2, 2f \n" 176 " .subsection 2 \n" 177 "2: b 1b \n" 178 " .previous \n" 179 " .set mips0 \n" 180 : "=&r" (retval), "=m" (*m), "=&r" (dummy) 181 : "R" (*m), "Jr" (val) 182 : "memory"); 183 } else { 184 unsigned long flags; 185 186 raw_local_irq_save(flags); 187 retval = *m; 188 *m = val; 189 raw_local_irq_restore(flags); /* implies memory barrier */ 190 } 191 192 smp_llsc_mb(); 193 194 return retval; 195} 196#else 197extern __u64 __xchg_u64_unsupported_on_32bit_kernels(volatile __u64 * m, __u64 val); 198#define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels 199#endif 200 201static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) 202{ 203 switch (size) { 204 case 4: 205 return __xchg_u32(ptr, x); 206 case 8: 207 return __xchg_u64(ptr, x); 208 } 209 210 return x; 211} 212 213#define xchg(ptr, x) \ 214({ \ 215 BUILD_BUG_ON(sizeof(*(ptr)) & ~0xc); \ 216 \ 217 ((__typeof__(*(ptr))) \ 218 __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \ 219}) 220 221extern void set_handler(unsigned long offset, void *addr, unsigned long len); 222extern void set_uncached_handler(unsigned long offset, void *addr, unsigned long len); 223 224typedef void (*vi_handler_t)(void); 225extern void *set_vi_handler(int n, vi_handler_t addr); 226 227extern void *set_except_vector(int n, void *addr); 228extern unsigned long ebase; 229extern void per_cpu_trap_init(void); 230 231/* 232 * See include/asm-ia64/system.h; prevents deadlock on SMP 233 * systems. 234 */ 235#define __ARCH_WANT_UNLOCKED_CTXSW 236 237extern unsigned long arch_align_stack(unsigned long sp); 238 239#endif /* _ASM_SYSTEM_H */