at v2.6.17-rc5 222 lines 5.2 kB view raw
1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle 7 * Copyright (C) 1996 by Paul M. Antoine 8 * Copyright (C) 1999 Silicon Graphics 9 * Copyright (C) 2000 MIPS Technologies, Inc. 10 */ 11#ifndef _ASM_INTERRUPT_H 12#define _ASM_INTERRUPT_H 13 14#include <linux/config.h> 15#include <asm/hazards.h> 16 17__asm__ ( 18 " .macro local_irq_enable \n" 19 " .set push \n" 20 " .set reorder \n" 21 " .set noat \n" 22#ifdef CONFIG_MIPS_MT_SMTC 23 " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n" 24 " ori $1, 0x400 \n" 25 " xori $1, 0x400 \n" 26 " mtc0 $1, $2, 1 \n" 27#elif defined(CONFIG_CPU_MIPSR2) 28 " ei \n" 29#else 30 " mfc0 $1,$12 \n" 31 " ori $1,0x1f \n" 32 " xori $1,0x1e \n" 33 " mtc0 $1,$12 \n" 34#endif 35 " irq_enable_hazard \n" 36 " .set pop \n" 37 " .endm"); 38 39static inline void local_irq_enable(void) 40{ 41 __asm__ __volatile__( 42 "local_irq_enable" 43 : /* no outputs */ 44 : /* no inputs */ 45 : "memory"); 46} 47 48/* 49 * For cli() we have to insert nops to make sure that the new value 50 * has actually arrived in the status register before the end of this 51 * macro. 52 * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs 53 * no nops at all. 54 */ 55/* 56 * For TX49, operating only IE bit is not enough. 57 * 58 * If mfc0 $12 follows store and the mfc0 is last instruction of a 59 * page and fetching the next instruction causes TLB miss, the result 60 * of the mfc0 might wrongly contain EXL bit. 61 * 62 * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008 63 * 64 * Workaround: mask EXL bit of the result or place a nop before mfc0. 65 */ 66__asm__ ( 67 " .macro local_irq_disable\n" 68 " .set push \n" 69 " .set noat \n" 70#ifdef CONFIG_MIPS_MT_SMTC 71 " mfc0 $1, $2, 1 \n" 72 " ori $1, 0x400 \n" 73 " .set noreorder \n" 74 " mtc0 $1, $2, 1 \n" 75#elif defined(CONFIG_CPU_MIPSR2) 76 " di \n" 77#else 78 " mfc0 $1,$12 \n" 79 " ori $1,0x1f \n" 80 " xori $1,0x1f \n" 81 " .set noreorder \n" 82 " mtc0 $1,$12 \n" 83#endif 84 " irq_disable_hazard \n" 85 " .set pop \n" 86 " .endm \n"); 87 88static inline void local_irq_disable(void) 89{ 90 __asm__ __volatile__( 91 "local_irq_disable" 92 : /* no outputs */ 93 : /* no inputs */ 94 : "memory"); 95} 96 97__asm__ ( 98 " .macro local_save_flags flags \n" 99 " .set push \n" 100 " .set reorder \n" 101#ifdef CONFIG_MIPS_MT_SMTC 102 " mfc0 \\flags, $2, 1 \n" 103#else 104 " mfc0 \\flags, $12 \n" 105#endif 106 " .set pop \n" 107 " .endm \n"); 108 109#define local_save_flags(x) \ 110__asm__ __volatile__( \ 111 "local_save_flags %0" \ 112 : "=r" (x)) 113 114__asm__ ( 115 " .macro local_irq_save result \n" 116 " .set push \n" 117 " .set reorder \n" 118 " .set noat \n" 119#ifdef CONFIG_MIPS_MT_SMTC 120 " mfc0 \\result, $2, 1 \n" 121 " ori $1, \\result, 0x400 \n" 122 " .set noreorder \n" 123 " mtc0 $1, $2, 1 \n" 124 " andi \\result, \\result, 0x400 \n" 125#elif defined(CONFIG_CPU_MIPSR2) 126 " di \\result \n" 127 " andi \\result, 1 \n" 128#else 129 " mfc0 \\result, $12 \n" 130 " ori $1, \\result, 0x1f \n" 131 " xori $1, 0x1f \n" 132 " .set noreorder \n" 133 " mtc0 $1, $12 \n" 134#endif 135 " irq_disable_hazard \n" 136 " .set pop \n" 137 " .endm \n"); 138 139#define local_irq_save(x) \ 140__asm__ __volatile__( \ 141 "local_irq_save\t%0" \ 142 : "=r" (x) \ 143 : /* no inputs */ \ 144 : "memory") 145 146__asm__ ( 147 " .macro local_irq_restore flags \n" 148 " .set push \n" 149 " .set noreorder \n" 150 " .set noat \n" 151#ifdef CONFIG_MIPS_MT_SMTC 152 "mfc0 $1, $2, 1 \n" 153 "andi \\flags, 0x400 \n" 154 "ori $1, 0x400 \n" 155 "xori $1, 0x400 \n" 156 "or \\flags, $1 \n" 157 "mtc0 \\flags, $2, 1 \n" 158#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) 159 /* 160 * Slow, but doesn't suffer from a relativly unlikely race 161 * condition we're having since days 1. 162 */ 163 " beqz \\flags, 1f \n" 164 " di \n" 165 " ei \n" 166 "1: \n" 167#elif defined(CONFIG_CPU_MIPSR2) 168 /* 169 * Fast, dangerous. Life is fun, life is good. 170 */ 171 " mfc0 $1, $12 \n" 172 " ins $1, \\flags, 0, 1 \n" 173 " mtc0 $1, $12 \n" 174#else 175 " mfc0 $1, $12 \n" 176 " andi \\flags, 1 \n" 177 " ori $1, 0x1f \n" 178 " xori $1, 0x1f \n" 179 " or \\flags, $1 \n" 180 " mtc0 \\flags, $12 \n" 181#endif 182 " irq_disable_hazard \n" 183 " .set pop \n" 184 " .endm \n"); 185 186#define local_irq_restore(flags) \ 187do { \ 188 unsigned long __tmp1; \ 189 \ 190 __asm__ __volatile__( \ 191 "local_irq_restore\t%0" \ 192 : "=r" (__tmp1) \ 193 : "0" (flags) \ 194 : "memory"); \ 195} while(0) 196 197static inline int irqs_disabled(void) 198{ 199#ifdef CONFIG_MIPS_MT_SMTC 200 /* 201 * SMTC model uses TCStatus.IXMT to disable interrupts for a thread/CPU 202 */ 203 unsigned long __result; 204 205 __asm__ __volatile__( 206 " .set noreorder \n" 207 " mfc0 %0, $2, 1 \n" 208 " andi %0, 0x400 \n" 209 " slt %0, $0, %0 \n" 210 " .set reorder \n" 211 : "=r" (__result)); 212 213 return __result; 214#else 215 unsigned long flags; 216 local_save_flags(flags); 217 218 return !(flags & 1); 219#endif 220} 221 222#endif /* _ASM_INTERRUPT_H */