FRV: Add support for emulation of userspace atomic ops [try #2]

Use traps 120-126 to emulate atomic cmpxchg32, xchg32, and XOR-, OR-, AND-, SUB-
and ADD-to-memory operations for userspace.

Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by David Howells and committed by Linus Torvalds e31c243f 0c93d8e4

+268 -1
+7 -1
arch/frv/kernel/entry-table.S
··· 316 .section .trap.vector 317 .org TBR_TT_TRAP0 >> 2 318 .long system_call 319 - .rept 126 320 .long __entry_unsupported_trap 321 .endr 322 .org TBR_TT_BREAK >> 2 323 .long __entry_debug_exception
··· 316 .section .trap.vector 317 .org TBR_TT_TRAP0 >> 2 318 .long system_call 319 + .rept 119 320 .long __entry_unsupported_trap 321 .endr 322 + 323 + # userspace atomic op emulation, traps 120-126 324 + .rept 7 325 + .long __entry_atomic_op 326 + .endr 327 + 328 .org TBR_TT_BREAK >> 2 329 .long __entry_debug_exception
+20
arch/frv/kernel/entry.S
··· 656 657 ############################################################################### 658 # 659 # handle media exception 660 # 661 ###############################################################################
··· 656 657 ############################################################################### 658 # 659 + # handle atomic operation emulation for userspace 660 + # 661 + ############################################################################### 662 + .globl __entry_atomic_op 663 + __entry_atomic_op: 664 + LEDS 0x6012 665 + sethi.p %hi(atomic_operation),gr5 666 + setlo %lo(atomic_operation),gr5 667 + movsg esfr1,gr8 668 + movsg epcr0,gr9 669 + movsg esr0,gr10 670 + 671 + # now that we've accessed the exception regs, we can enable exceptions 672 + movsg psr,gr4 673 + ori gr4,#PSR_ET,gr4 674 + movgs gr4,psr 675 + jmpl @(gr5,gr0) ; call atomic_operation(esfr1,epcr0,esr0) 676 + 677 + ############################################################################### 678 + # 679 # handle media exception 680 # 681 ###############################################################################
+227
arch/frv/kernel/traps.c
··· 102 103 /*****************************************************************************/ 104 /* 105 * 106 */ 107 asmlinkage void media_exception(unsigned long msr0, unsigned long msr1)
··· 102 103 /*****************************************************************************/ 104 /* 105 + * handle atomic operations with errors 106 + * - arguments in gr8, gr9, gr10 107 + * - original memory value placed in gr5 108 + * - replacement memory value placed in gr9 109 + */ 110 + asmlinkage void atomic_operation(unsigned long esfr1, unsigned long epcr0, 111 + unsigned long esr0) 112 + { 113 + static DEFINE_SPINLOCK(atomic_op_lock); 114 + unsigned long x, y, z, *p; 115 + mm_segment_t oldfs; 116 + siginfo_t info; 117 + int ret; 118 + 119 + y = 0; 120 + z = 0; 121 + 122 + oldfs = get_fs(); 123 + if (!user_mode(__frame)) 124 + set_fs(KERNEL_DS); 125 + 126 + switch (__frame->tbr & TBR_TT) { 127 + /* TIRA gr0,#120 128 + * u32 __atomic_user_cmpxchg32(u32 *ptr, u32 test, u32 new) 129 + */ 130 + case TBR_TT_ATOMIC_CMPXCHG32: 131 + p = (unsigned long *) __frame->gr8; 132 + x = __frame->gr9; 133 + y = __frame->gr10; 134 + 135 + for (;;) { 136 + ret = get_user(z, p); 137 + if (ret < 0) 138 + goto error; 139 + 140 + if (z != x) 141 + goto done; 142 + 143 + spin_lock_irq(&atomic_op_lock); 144 + 145 + if (__get_user(z, p) == 0) { 146 + if (z != x) 147 + goto done2; 148 + 149 + if (__put_user(y, p) == 0) 150 + goto done2; 151 + goto error2; 152 + } 153 + 154 + spin_unlock_irq(&atomic_op_lock); 155 + } 156 + 157 + /* TIRA gr0,#121 158 + * u32 __atomic_kernel_xchg32(void *v, u32 new) 159 + */ 160 + case TBR_TT_ATOMIC_XCHG32: 161 + p = (unsigned long *) __frame->gr8; 162 + y = __frame->gr9; 163 + 164 + for (;;) { 165 + ret = get_user(z, p); 166 + if (ret < 0) 167 + goto error; 168 + 169 + spin_lock_irq(&atomic_op_lock); 170 + 171 + if (__get_user(z, p) == 0) { 172 + if (__put_user(y, p) == 0) 173 + goto done2; 174 + goto error2; 175 + } 176 + 177 + spin_unlock_irq(&atomic_op_lock); 178 + } 179 + 180 + /* TIRA gr0,#122 181 + * ulong __atomic_kernel_XOR_return(ulong i, ulong *v) 182 + */ 183 + case TBR_TT_ATOMIC_XOR: 184 + p = (unsigned long *) __frame->gr8; 185 + x = __frame->gr9; 186 + 187 + for (;;) { 188 + ret = get_user(z, p); 189 + if (ret < 0) 190 + goto error; 191 + 192 + spin_lock_irq(&atomic_op_lock); 193 + 194 + if (__get_user(z, p) == 0) { 195 + y = x ^ z; 196 + if (__put_user(y, p) == 0) 197 + goto done2; 198 + goto error2; 199 + } 200 + 201 + spin_unlock_irq(&atomic_op_lock); 202 + } 203 + 204 + /* TIRA gr0,#123 205 + * ulong __atomic_kernel_OR_return(ulong i, ulong *v) 206 + */ 207 + case TBR_TT_ATOMIC_OR: 208 + p = (unsigned long *) __frame->gr8; 209 + x = __frame->gr9; 210 + 211 + for (;;) { 212 + ret = get_user(z, p); 213 + if (ret < 0) 214 + goto error; 215 + 216 + spin_lock_irq(&atomic_op_lock); 217 + 218 + if (__get_user(z, p) == 0) { 219 + y = x ^ z; 220 + if (__put_user(y, p) == 0) 221 + goto done2; 222 + goto error2; 223 + } 224 + 225 + spin_unlock_irq(&atomic_op_lock); 226 + } 227 + 228 + /* TIRA gr0,#124 229 + * ulong __atomic_kernel_AND_return(ulong i, ulong *v) 230 + */ 231 + case TBR_TT_ATOMIC_AND: 232 + p = (unsigned long *) __frame->gr8; 233 + x = __frame->gr9; 234 + 235 + for (;;) { 236 + ret = get_user(z, p); 237 + if (ret < 0) 238 + goto error; 239 + 240 + spin_lock_irq(&atomic_op_lock); 241 + 242 + if (__get_user(z, p) == 0) { 243 + y = x & z; 244 + if (__put_user(y, p) == 0) 245 + goto done2; 246 + goto error2; 247 + } 248 + 249 + spin_unlock_irq(&atomic_op_lock); 250 + } 251 + 252 + /* TIRA gr0,#125 253 + * int __atomic_user_sub_return(atomic_t *v, int i) 254 + */ 255 + case TBR_TT_ATOMIC_SUB: 256 + p = (unsigned long *) __frame->gr8; 257 + x = __frame->gr9; 258 + 259 + for (;;) { 260 + ret = get_user(z, p); 261 + if (ret < 0) 262 + goto error; 263 + 264 + spin_lock_irq(&atomic_op_lock); 265 + 266 + if (__get_user(z, p) == 0) { 267 + y = z - x; 268 + if (__put_user(y, p) == 0) 269 + goto done2; 270 + goto error2; 271 + } 272 + 273 + spin_unlock_irq(&atomic_op_lock); 274 + } 275 + 276 + /* TIRA gr0,#126 277 + * int __atomic_user_add_return(atomic_t *v, int i) 278 + */ 279 + case TBR_TT_ATOMIC_ADD: 280 + p = (unsigned long *) __frame->gr8; 281 + x = __frame->gr9; 282 + 283 + for (;;) { 284 + ret = get_user(z, p); 285 + if (ret < 0) 286 + goto error; 287 + 288 + spin_lock_irq(&atomic_op_lock); 289 + 290 + if (__get_user(z, p) == 0) { 291 + y = z + x; 292 + if (__put_user(y, p) == 0) 293 + goto done2; 294 + goto error2; 295 + } 296 + 297 + spin_unlock_irq(&atomic_op_lock); 298 + } 299 + 300 + default: 301 + BUG(); 302 + } 303 + 304 + done2: 305 + spin_unlock_irq(&atomic_op_lock); 306 + done: 307 + if (!user_mode(__frame)) 308 + set_fs(oldfs); 309 + __frame->gr5 = z; 310 + __frame->gr9 = y; 311 + return; 312 + 313 + error2: 314 + spin_unlock_irq(&atomic_op_lock); 315 + error: 316 + if (!user_mode(__frame)) 317 + set_fs(oldfs); 318 + __frame->pc -= 4; 319 + 320 + die_if_kernel("-- Atomic Op Error --\n"); 321 + 322 + info.si_signo = SIGSEGV; 323 + info.si_code = SEGV_ACCERR; 324 + info.si_errno = 0; 325 + info.si_addr = (void *) __frame->pc; 326 + 327 + force_sig_info(info.si_signo, &info, current); 328 + } 329 + 330 + /*****************************************************************************/ 331 + /* 332 * 333 */ 334 asmlinkage void media_exception(unsigned long msr0, unsigned long msr1)
+14
include/asm-frv/spr-regs.h
··· 99 #define TBR_TT_TRAP1 (0x81 << 4) 100 #define TBR_TT_TRAP2 (0x82 << 4) 101 #define TBR_TT_TRAP3 (0x83 << 4) 102 #define TBR_TT_TRAP126 (0xfe << 4) 103 #define TBR_TT_BREAK (0xff << 4) 104 105 #define __get_TBR() ({ unsigned long x; asm volatile("movsg tbr,%0" : "=r"(x)); x; }) 106
··· 99 #define TBR_TT_TRAP1 (0x81 << 4) 100 #define TBR_TT_TRAP2 (0x82 << 4) 101 #define TBR_TT_TRAP3 (0x83 << 4) 102 + #define TBR_TT_TRAP120 (0xf8 << 4) 103 + #define TBR_TT_TRAP121 (0xf9 << 4) 104 + #define TBR_TT_TRAP122 (0xfa << 4) 105 + #define TBR_TT_TRAP123 (0xfb << 4) 106 + #define TBR_TT_TRAP124 (0xfc << 4) 107 + #define TBR_TT_TRAP125 (0xfd << 4) 108 #define TBR_TT_TRAP126 (0xfe << 4) 109 #define TBR_TT_BREAK (0xff << 4) 110 + 111 + #define TBR_TT_ATOMIC_CMPXCHG32 TBR_TT_TRAP120 112 + #define TBR_TT_ATOMIC_XCHG32 TBR_TT_TRAP121 113 + #define TBR_TT_ATOMIC_XOR TBR_TT_TRAP122 114 + #define TBR_TT_ATOMIC_OR TBR_TT_TRAP123 115 + #define TBR_TT_ATOMIC_AND TBR_TT_TRAP124 116 + #define TBR_TT_ATOMIC_SUB TBR_TT_TRAP125 117 + #define TBR_TT_ATOMIC_ADD TBR_TT_TRAP126 118 119 #define __get_TBR() ({ unsigned long x; asm volatile("movsg tbr,%0" : "=r"(x)); x; }) 120