at v2.6.32-rc8 232 lines 6.1 kB view raw
1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle 7 * Copyright (C) 1996 by Paul M. Antoine 8 * Copyright (C) 1999 Silicon Graphics 9 * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com 10 * Copyright (C) 2000 MIPS Technologies, Inc. 11 */ 12#ifndef _ASM_SYSTEM_H 13#define _ASM_SYSTEM_H 14 15#include <linux/types.h> 16#include <linux/irqflags.h> 17 18#include <asm/addrspace.h> 19#include <asm/barrier.h> 20#include <asm/cmpxchg.h> 21#include <asm/cpu-features.h> 22#include <asm/dsp.h> 23#include <asm/watch.h> 24#include <asm/war.h> 25 26 27/* 28 * switch_to(n) should switch tasks to task nr n, first 29 * checking that n isn't the current task, in which case it does nothing. 30 */ 31extern asmlinkage void *resume(void *last, void *next, void *next_ti); 32 33struct task_struct; 34 35extern unsigned int ll_bit; 36extern struct task_struct *ll_task; 37 38#ifdef CONFIG_MIPS_MT_FPAFF 39 40/* 41 * Handle the scheduler resume end of FPU affinity management. We do this 42 * inline to try to keep the overhead down. If we have been forced to run on 43 * a "CPU" with an FPU because of a previous high level of FP computation, 44 * but did not actually use the FPU during the most recent time-slice (CU1 45 * isn't set), we undo the restriction on cpus_allowed. 46 * 47 * We're not calling set_cpus_allowed() here, because we have no need to 48 * force prompt migration - we're already switching the current CPU to a 49 * different thread. 50 */ 51 52#define __mips_mt_fpaff_switch_to(prev) \ 53do { \ 54 struct thread_info *__prev_ti = task_thread_info(prev); \ 55 \ 56 if (cpu_has_fpu && \ 57 test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) && \ 58 (!(KSTK_STATUS(prev) & ST0_CU1))) { \ 59 clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND); \ 60 prev->cpus_allowed = prev->thread.user_cpus_allowed; \ 61 } \ 62 next->thread.emulated_fp = 0; \ 63} while(0) 64 65#else 66#define __mips_mt_fpaff_switch_to(prev) do { (void) (prev); } while (0) 67#endif 68 69#define __clear_software_ll_bit() \ 70do { \ 71 if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc) \ 72 ll_bit = 0; \ 73} while (0) 74 75#define switch_to(prev, next, last) \ 76do { \ 77 __mips_mt_fpaff_switch_to(prev); \ 78 if (cpu_has_dsp) \ 79 __save_dsp(prev); \ 80 __clear_software_ll_bit(); \ 81 (last) = resume(prev, next, task_thread_info(next)); \ 82} while (0) 83 84#define finish_arch_switch(prev) \ 85do { \ 86 if (cpu_has_dsp) \ 87 __restore_dsp(current); \ 88 if (cpu_has_userlocal) \ 89 write_c0_userlocal(current_thread_info()->tp_value); \ 90 __restore_watch(); \ 91} while (0) 92 93static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) 94{ 95 __u32 retval; 96 97 if (kernel_uses_llsc && R10000_LLSC_WAR) { 98 unsigned long dummy; 99 100 __asm__ __volatile__( 101 " .set mips3 \n" 102 "1: ll %0, %3 # xchg_u32 \n" 103 " .set mips0 \n" 104 " move %2, %z4 \n" 105 " .set mips3 \n" 106 " sc %2, %1 \n" 107 " beqzl %2, 1b \n" 108 " .set mips0 \n" 109 : "=&r" (retval), "=m" (*m), "=&r" (dummy) 110 : "R" (*m), "Jr" (val) 111 : "memory"); 112 } else if (kernel_uses_llsc) { 113 unsigned long dummy; 114 115 __asm__ __volatile__( 116 " .set mips3 \n" 117 "1: ll %0, %3 # xchg_u32 \n" 118 " .set mips0 \n" 119 " move %2, %z4 \n" 120 " .set mips3 \n" 121 " sc %2, %1 \n" 122 " beqz %2, 2f \n" 123 " .subsection 2 \n" 124 "2: b 1b \n" 125 " .previous \n" 126 " .set mips0 \n" 127 : "=&r" (retval), "=m" (*m), "=&r" (dummy) 128 : "R" (*m), "Jr" (val) 129 : "memory"); 130 } else { 131 unsigned long flags; 132 133 raw_local_irq_save(flags); 134 retval = *m; 135 *m = val; 136 raw_local_irq_restore(flags); /* implies memory barrier */ 137 } 138 139 smp_llsc_mb(); 140 141 return retval; 142} 143 144#ifdef CONFIG_64BIT 145static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) 146{ 147 __u64 retval; 148 149 if (kernel_uses_llsc && R10000_LLSC_WAR) { 150 unsigned long dummy; 151 152 __asm__ __volatile__( 153 " .set mips3 \n" 154 "1: lld %0, %3 # xchg_u64 \n" 155 " move %2, %z4 \n" 156 " scd %2, %1 \n" 157 " beqzl %2, 1b \n" 158 " .set mips0 \n" 159 : "=&r" (retval), "=m" (*m), "=&r" (dummy) 160 : "R" (*m), "Jr" (val) 161 : "memory"); 162 } else if (kernel_uses_llsc) { 163 unsigned long dummy; 164 165 __asm__ __volatile__( 166 " .set mips3 \n" 167 "1: lld %0, %3 # xchg_u64 \n" 168 " move %2, %z4 \n" 169 " scd %2, %1 \n" 170 " beqz %2, 2f \n" 171 " .subsection 2 \n" 172 "2: b 1b \n" 173 " .previous \n" 174 " .set mips0 \n" 175 : "=&r" (retval), "=m" (*m), "=&r" (dummy) 176 : "R" (*m), "Jr" (val) 177 : "memory"); 178 } else { 179 unsigned long flags; 180 181 raw_local_irq_save(flags); 182 retval = *m; 183 *m = val; 184 raw_local_irq_restore(flags); /* implies memory barrier */ 185 } 186 187 smp_llsc_mb(); 188 189 return retval; 190} 191#else 192extern __u64 __xchg_u64_unsupported_on_32bit_kernels(volatile __u64 * m, __u64 val); 193#define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels 194#endif 195 196/* This function doesn't exist, so you'll get a linker error 197 if something tries to do an invalid xchg(). */ 198extern void __xchg_called_with_bad_pointer(void); 199 200static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) 201{ 202 switch (size) { 203 case 4: 204 return __xchg_u32(ptr, x); 205 case 8: 206 return __xchg_u64(ptr, x); 207 } 208 __xchg_called_with_bad_pointer(); 209 return x; 210} 211 212#define xchg(ptr, x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))) 213 214extern void set_handler(unsigned long offset, void *addr, unsigned long len); 215extern void set_uncached_handler(unsigned long offset, void *addr, unsigned long len); 216 217typedef void (*vi_handler_t)(void); 218extern void *set_vi_handler(int n, vi_handler_t addr); 219 220extern void *set_except_vector(int n, void *addr); 221extern unsigned long ebase; 222extern void per_cpu_trap_init(void); 223 224/* 225 * See include/asm-ia64/system.h; prevents deadlock on SMP 226 * systems. 227 */ 228#define __ARCH_WANT_UNLOCKED_CTXSW 229 230extern unsigned long arch_align_stack(unsigned long sp); 231 232#endif /* _ASM_SYSTEM_H */