Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[S390] Inline assembly cleanup.

Major cleanup of all s390 inline assemblies. They now have a common
coding style. Quite a few have been shortened, mainly by using register
asm variables. Use of the EX_TABLE macro helps as well. The atomic ops,
bit ops and locking inlines new use the Q-constraint if a newer gcc
is used. That results in slightly better code.

Thanks to Christian Borntraeger for proof reading the changes.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

+1758 -2268
+65 -139
arch/s390/crypto/crypt_s390.h
··· 105 105 }; 106 106 107 107 /* 108 - * Standard fixup and ex_table sections for crypt_s390 inline functions. 109 - * label 0: the s390 crypto operation 110 - * label 1: just after 1 to catch illegal operation exception 111 - * (unsupported model) 112 - * label 6: the return point after fixup 113 - * label 7: set error value if exception _in_ crypto operation 114 - * label 8: set error value if illegal operation exception 115 - * [ret] is the variable to receive the error code 116 - * [ERR] is the error code value 117 - */ 118 - #ifndef CONFIG_64BIT 119 - #define __crypt_s390_fixup \ 120 - ".section .fixup,\"ax\" \n" \ 121 - "7: lhi %0,%h[e1] \n" \ 122 - " bras 1,9f \n" \ 123 - " .long 6b \n" \ 124 - "8: lhi %0,%h[e2] \n" \ 125 - " bras 1,9f \n" \ 126 - " .long 6b \n" \ 127 - "9: l 1,0(1) \n" \ 128 - " br 1 \n" \ 129 - ".previous \n" \ 130 - ".section __ex_table,\"a\" \n" \ 131 - " .align 4 \n" \ 132 - " .long 0b,7b \n" \ 133 - " .long 1b,8b \n" \ 134 - ".previous" 135 - #else /* CONFIG_64BIT */ 136 - #define __crypt_s390_fixup \ 137 - ".section .fixup,\"ax\" \n" \ 138 - "7: lhi %0,%h[e1] \n" \ 139 - " jg 6b \n" \ 140 - "8: lhi %0,%h[e2] \n" \ 141 - " jg 6b \n" \ 142 - ".previous\n" \ 143 - ".section __ex_table,\"a\" \n" \ 144 - " .align 8 \n" \ 145 - " .quad 0b,7b \n" \ 146 - " .quad 1b,8b \n" \ 147 - ".previous" 148 - #endif /* CONFIG_64BIT */ 149 - 150 - /* 151 - * Standard code for setting the result of s390 crypto instructions. 152 - * %0: the register which will receive the result 153 - * [result]: the register containing the result (e.g. second operand length 154 - * to compute number of processed bytes]. 155 - */ 156 - #ifndef CONFIG_64BIT 157 - #define __crypt_s390_set_result \ 158 - " lr %0,%[result] \n" 159 - #else /* CONFIG_64BIT */ 160 - #define __crypt_s390_set_result \ 161 - " lgr %0,%[result] \n" 162 - #endif 163 - 164 - /* 165 108 * Executes the KM (CIPHER MESSAGE) operation of the CPU. 166 109 * @param func: the function code passed to KM; see crypt_s390_km_func 167 110 * @param param: address of parameter block; see POP for details on each func ··· 119 176 { 120 177 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; 121 178 register void* __param asm("1") = param; 122 - register u8* __dest asm("4") = dest; 123 179 register const u8* __src asm("2") = src; 124 180 register long __src_len asm("3") = src_len; 181 + register u8* __dest asm("4") = dest; 125 182 int ret; 126 183 127 - ret = 0; 128 - __asm__ __volatile__ ( 129 - "0: .insn rre,0xB92E0000,%1,%2 \n" /* KM opcode */ 184 + asm volatile( 185 + "0: .insn rre,0xb92e0000,%3,%1 \n" /* KM opcode */ 130 186 "1: brc 1,0b \n" /* handle partial completion */ 131 - __crypt_s390_set_result 132 - "6: \n" 133 - __crypt_s390_fixup 134 - : "+d" (ret), "+a" (__dest), "+a" (__src), 135 - [result] "+d" (__src_len) 136 - : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func), 137 - "a" (__param) 138 - : "cc", "memory" 139 - ); 140 - if (ret >= 0 && func & CRYPT_S390_FUNC_MASK){ 141 - ret = src_len - ret; 142 - } 143 - return ret; 187 + " ahi %0,%h7\n" 188 + "2: ahi %0,%h8\n" 189 + "3:\n" 190 + EX_TABLE(0b,3b) EX_TABLE(1b,2b) 191 + : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest) 192 + : "d" (__func), "a" (__param), "0" (-EFAULT), 193 + "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory"); 194 + if (ret < 0) 195 + return ret; 196 + return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; 144 197 } 145 198 146 199 /* ··· 154 215 { 155 216 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; 156 217 register void* __param asm("1") = param; 157 - register u8* __dest asm("4") = dest; 158 218 register const u8* __src asm("2") = src; 159 219 register long __src_len asm("3") = src_len; 220 + register u8* __dest asm("4") = dest; 160 221 int ret; 161 222 162 - ret = 0; 163 - __asm__ __volatile__ ( 164 - "0: .insn rre,0xB92F0000,%1,%2 \n" /* KMC opcode */ 223 + asm volatile( 224 + "0: .insn rre,0xb92f0000,%3,%1 \n" /* KMC opcode */ 165 225 "1: brc 1,0b \n" /* handle partial completion */ 166 - __crypt_s390_set_result 167 - "6: \n" 168 - __crypt_s390_fixup 169 - : "+d" (ret), "+a" (__dest), "+a" (__src), 170 - [result] "+d" (__src_len) 171 - : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func), 172 - "a" (__param) 173 - : "cc", "memory" 174 - ); 175 - if (ret >= 0 && func & CRYPT_S390_FUNC_MASK){ 176 - ret = src_len - ret; 177 - } 178 - return ret; 226 + " ahi %0,%h7\n" 227 + "2: ahi %0,%h8\n" 228 + "3:\n" 229 + EX_TABLE(0b,3b) EX_TABLE(1b,2b) 230 + : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest) 231 + : "d" (__func), "a" (__param), "0" (-EFAULT), 232 + "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory"); 233 + if (ret < 0) 234 + return ret; 235 + return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; 179 236 } 180 237 181 238 /* ··· 193 258 register long __src_len asm("3") = src_len; 194 259 int ret; 195 260 196 - ret = 0; 197 - __asm__ __volatile__ ( 198 - "0: .insn rre,0xB93E0000,%1,%1 \n" /* KIMD opcode */ 199 - "1: brc 1,0b \n" /* handle partical completion */ 200 - __crypt_s390_set_result 201 - "6: \n" 202 - __crypt_s390_fixup 203 - : "+d" (ret), "+a" (__src), [result] "+d" (__src_len) 204 - : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func), 205 - "a" (__param) 206 - : "cc", "memory" 207 - ); 208 - if (ret >= 0 && (func & CRYPT_S390_FUNC_MASK)){ 209 - ret = src_len - ret; 210 - } 211 - return ret; 261 + asm volatile( 262 + "0: .insn rre,0xb93e0000,%1,%1 \n" /* KIMD opcode */ 263 + "1: brc 1,0b \n" /* handle partial completion */ 264 + " ahi %0,%h6\n" 265 + "2: ahi %0,%h7\n" 266 + "3:\n" 267 + EX_TABLE(0b,3b) EX_TABLE(1b,2b) 268 + : "=d" (ret), "+a" (__src), "+d" (__src_len) 269 + : "d" (__func), "a" (__param), "0" (-EFAULT), 270 + "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory"); 271 + if (ret < 0) 272 + return ret; 273 + return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; 212 274 } 213 275 214 276 /* ··· 226 294 register long __src_len asm("3") = src_len; 227 295 int ret; 228 296 229 - ret = 0; 230 - __asm__ __volatile__ ( 231 - "0: .insn rre,0xB93F0000,%1,%1 \n" /* KLMD opcode */ 232 - "1: brc 1,0b \n" /* handle partical completion */ 233 - __crypt_s390_set_result 234 - "6: \n" 235 - __crypt_s390_fixup 236 - : "+d" (ret), "+a" (__src), [result] "+d" (__src_len) 237 - : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func), 238 - "a" (__param) 239 - : "cc", "memory" 240 - ); 241 - if (ret >= 0 && func & CRYPT_S390_FUNC_MASK){ 242 - ret = src_len - ret; 243 - } 244 - return ret; 297 + asm volatile( 298 + "0: .insn rre,0xb93f0000,%1,%1 \n" /* KLMD opcode */ 299 + "1: brc 1,0b \n" /* handle partial completion */ 300 + " ahi %0,%h6\n" 301 + "2: ahi %0,%h7\n" 302 + "3:\n" 303 + EX_TABLE(0b,3b) EX_TABLE(1b,2b) 304 + : "=d" (ret), "+a" (__src), "+d" (__src_len) 305 + : "d" (__func), "a" (__param), "0" (-EFAULT), 306 + "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory"); 307 + if (ret < 0) 308 + return ret; 309 + return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; 245 310 } 246 311 247 312 /* ··· 260 331 register long __src_len asm("3") = src_len; 261 332 int ret; 262 333 263 - ret = 0; 264 - __asm__ __volatile__ ( 265 - "0: .insn rre,0xB91E0000,%5,%5 \n" /* KMAC opcode */ 266 - "1: brc 1,0b \n" /* handle partical completion */ 267 - __crypt_s390_set_result 268 - "6: \n" 269 - __crypt_s390_fixup 270 - : "+d" (ret), "+a" (__src), [result] "+d" (__src_len) 271 - : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func), 272 - "a" (__param) 273 - : "cc", "memory" 274 - ); 275 - if (ret >= 0 && func & CRYPT_S390_FUNC_MASK){ 276 - ret = src_len - ret; 277 - } 278 - return ret; 334 + asm volatile( 335 + "0: .insn rre,0xb91e0000,%1,%1 \n" /* KLAC opcode */ 336 + "1: brc 1,0b \n" /* handle partial completion */ 337 + " ahi %0,%h6\n" 338 + "2: ahi %0,%h7\n" 339 + "3:\n" 340 + EX_TABLE(0b,3b) EX_TABLE(1b,2b) 341 + : "=d" (ret), "+a" (__src), "+d" (__src_len) 342 + : "d" (__func), "a" (__param), "0" (-EFAULT), 343 + "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory"); 344 + if (ret < 0) 345 + return ret; 346 + return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; 279 347 } 280 348 281 349 /**
+7 -16
arch/s390/hypfs/hypfs_diag.c
··· 333 333 register unsigned long _subcode asm("0") = subcode; 334 334 register unsigned long _size asm("1") = size; 335 335 336 - asm volatile (" diag %2,%0,0x204\n" 337 - "0: \n" ".section __ex_table,\"a\"\n" 338 - #ifndef __s390x__ 339 - " .align 4\n" 340 - " .long 0b,0b\n" 341 - #else 342 - " .align 8\n" 343 - " .quad 0b,0b\n" 344 - #endif 345 - ".previous":"+d" (_subcode), "+d"(_size) 346 - :"d"(addr) 347 - :"memory"); 336 + asm volatile( 337 + " diag %2,%0,0x204\n" 338 + "0:\n" 339 + EX_TABLE(0b,0b) 340 + : "+d" (_subcode), "+d" (_size) : "d" (addr) : "memory"); 348 341 if (_subcode) 349 342 return -1; 350 - else 351 - return _size; 343 + return _size; 352 344 } 353 345 354 346 /* ··· 483 491 484 492 static void diag224(void *ptr) 485 493 { 486 - asm volatile(" diag %0,%1,0x224\n" 487 - : :"d" (0), "d"(ptr) : "memory"); 494 + asm volatile("diag %0,%1,0x224" : :"d" (0), "d"(ptr) : "memory"); 488 495 } 489 496 490 497 static int diag224_get_name_table(void)
+1 -4
arch/s390/kernel/compat_linux.c
··· 544 544 current->ptrace &= ~PT_DTRACE; 545 545 task_unlock(current); 546 546 current->thread.fp_regs.fpc=0; 547 - __asm__ __volatile__ 548 - ("sr 0,0\n\t" 549 - "sfpc 0,0\n\t" 550 - : : :"0"); 547 + asm volatile("sfpc %0,0" : : "d" (0)); 551 548 } 552 549 putname(filename); 553 550 out:
+30 -53
arch/s390/kernel/cpcmd.c
··· 25 25 */ 26 26 int __cpcmd(const char *cmd, char *response, int rlen, int *response_code) 27 27 { 28 - const int mask = 0x40000000L; 29 - unsigned long flags; 30 - int return_code; 31 - int return_len; 32 - int cmdlen; 28 + unsigned long flags, cmdlen; 29 + int return_code, return_len; 33 30 34 31 spin_lock_irqsave(&cpcmd_lock, flags); 35 32 cmdlen = strlen(cmd); ··· 35 38 ASCEBC(cpcmd_buf, cmdlen); 36 39 37 40 if (response != NULL && rlen > 0) { 41 + register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf; 42 + register unsigned long reg3 asm ("3") = (addr_t) response; 43 + register unsigned long reg4 asm ("4") = cmdlen | 0x40000000L; 44 + register unsigned long reg5 asm ("5") = rlen; 45 + 38 46 memset(response, 0, rlen); 47 + asm volatile( 39 48 #ifndef CONFIG_64BIT 40 - asm volatile ( "lra 2,0(%2)\n" 41 - "lr 4,%3\n" 42 - "o 4,%6\n" 43 - "lra 3,0(%4)\n" 44 - "lr 5,%5\n" 45 - "diag 2,4,0x8\n" 46 - "brc 8, 1f\n" 47 - "ar 5, %5\n" 48 - "1: \n" 49 - "lr %0,4\n" 50 - "lr %1,5\n" 51 - : "=d" (return_code), "=d" (return_len) 52 - : "a" (cpcmd_buf), "d" (cmdlen), 53 - "a" (response), "d" (rlen), "m" (mask) 54 - : "cc", "2", "3", "4", "5" ); 49 + " diag %2,%0,0x8\n" 50 + " brc 8,1f\n" 51 + " ar %1,%4\n" 55 52 #else /* CONFIG_64BIT */ 56 - asm volatile ( "lrag 2,0(%2)\n" 57 - "lgr 4,%3\n" 58 - "o 4,%6\n" 59 - "lrag 3,0(%4)\n" 60 - "lgr 5,%5\n" 61 - "sam31\n" 62 - "diag 2,4,0x8\n" 63 - "sam64\n" 64 - "brc 8, 1f\n" 65 - "agr 5, %5\n" 66 - "1: \n" 67 - "lgr %0,4\n" 68 - "lgr %1,5\n" 69 - : "=d" (return_code), "=d" (return_len) 70 - : "a" (cpcmd_buf), "d" (cmdlen), 71 - "a" (response), "d" (rlen), "m" (mask) 72 - : "cc", "2", "3", "4", "5" ); 53 + " sam31\n" 54 + " diag %2,%0,0x8\n" 55 + " sam64\n" 56 + " brc 8,1f\n" 57 + " agr %1,%4\n" 73 58 #endif /* CONFIG_64BIT */ 59 + "1:\n" 60 + : "+d" (reg4), "+d" (reg5) 61 + : "d" (reg2), "d" (reg3), "d" (rlen) : "cc"); 62 + return_code = (int) reg4; 63 + return_len = (int) reg5; 74 64 EBCASC(response, rlen); 75 65 } else { 66 + register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf; 67 + register unsigned long reg3 asm ("3") = cmdlen; 76 68 return_len = 0; 69 + asm volatile( 77 70 #ifndef CONFIG_64BIT 78 - asm volatile ( "lra 2,0(%1)\n" 79 - "lr 3,%2\n" 80 - "diag 2,3,0x8\n" 81 - "lr %0,3\n" 82 - : "=d" (return_code) 83 - : "a" (cpcmd_buf), "d" (cmdlen) 84 - : "2", "3" ); 71 + " diag %1,%0,0x8\n" 85 72 #else /* CONFIG_64BIT */ 86 - asm volatile ( "lrag 2,0(%1)\n" 87 - "lgr 3,%2\n" 88 - "sam31\n" 89 - "diag 2,3,0x8\n" 90 - "sam64\n" 91 - "lgr %0,3\n" 92 - : "=d" (return_code) 93 - : "a" (cpcmd_buf), "d" (cmdlen) 94 - : "2", "3" ); 73 + " sam31\n" 74 + " diag %1,%0,0x8\n" 75 + " sam64\n" 95 76 #endif /* CONFIG_64BIT */ 77 + : "+d" (reg3) : "d" (reg2) : "cc"); 78 + return_code = (int) reg3; 96 79 } 97 80 spin_unlock_irqrestore(&cpcmd_lock, flags); 98 81 if (response_code != NULL)
+6 -15
arch/s390/kernel/ipl.c
··· 120 120 121 121 static int diag308(unsigned long subcode, void *addr) 122 122 { 123 - register unsigned long _addr asm("0") = (unsigned long)addr; 123 + register unsigned long _addr asm("0") = (unsigned long) addr; 124 124 register unsigned long _rc asm("1") = 0; 125 125 126 - asm volatile ( 127 - " diag %0,%2,0x308\n" 128 - "0: \n" 129 - ".section __ex_table,\"a\"\n" 130 - #ifdef CONFIG_64BIT 131 - " .align 8\n" 132 - " .quad 0b, 0b\n" 133 - #else 134 - " .align 4\n" 135 - " .long 0b, 0b\n" 136 - #endif 137 - ".previous\n" 126 + asm volatile( 127 + " diag %0,%2,0x308\n" 128 + "0:\n" 129 + EX_TABLE(0b,0b) 138 130 : "+d" (_addr), "+d" (_rc) 139 - : "d" (subcode) : "cc", "memory" ); 140 - 131 + : "d" (subcode) : "cc", "memory"); 141 132 return _rc; 142 133 } 143 134
+3 -2
arch/s390/kernel/process.c
··· 45 45 #include <asm/irq.h> 46 46 #include <asm/timer.h> 47 47 48 - asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); 48 + asmlinkage void ret_from_fork(void) asm ("ret_from_fork"); 49 49 50 50 /* 51 51 * Return saved PC of a blocked thread. used in kernel/sched. ··· 177 177 178 178 extern void kernel_thread_starter(void); 179 179 180 - __asm__(".align 4\n" 180 + asm( 181 + ".align 4\n" 181 182 "kernel_thread_starter:\n" 182 183 " la 2,0(10)\n" 183 184 " basr 14,9\n"
+11 -11
arch/s390/kernel/semaphore.c
··· 26 26 { 27 27 int old_val, new_val; 28 28 29 - __asm__ __volatile__(" l %0,0(%3)\n" 30 - "0: ltr %1,%0\n" 31 - " jhe 1f\n" 32 - " lhi %1,0\n" 33 - "1: ar %1,%4\n" 34 - " cs %0,%1,0(%3)\n" 35 - " jl 0b\n" 36 - : "=&d" (old_val), "=&d" (new_val), 37 - "=m" (sem->count) 38 - : "a" (&sem->count), "d" (incr), "m" (sem->count) 39 - : "cc" ); 29 + asm volatile( 30 + " l %0,0(%3)\n" 31 + "0: ltr %1,%0\n" 32 + " jhe 1f\n" 33 + " lhi %1,0\n" 34 + "1: ar %1,%4\n" 35 + " cs %0,%1,0(%3)\n" 36 + " jl 0b\n" 37 + : "=&d" (old_val), "=&d" (new_val), "=m" (sem->count) 38 + : "a" (&sem->count), "d" (incr), "m" (sem->count) 39 + : "cc"); 40 40 return old_val; 41 41 } 42 42
+1 -1
arch/s390/kernel/setup.c
··· 101 101 /* 102 102 * Store processor id in lowcore (used e.g. in timer_interrupt) 103 103 */ 104 - asm volatile ("stidp %0": "=m" (S390_lowcore.cpu_data.cpu_id)); 104 + asm volatile("stidp %0": "=m" (S390_lowcore.cpu_data.cpu_id)); 105 105 S390_lowcore.cpu_data.cpu_addr = addr; 106 106 107 107 /*
+27 -46
arch/s390/kernel/smp.c
··· 63 63 static void smp_ext_bitcall_others(ec_bit_sig); 64 64 65 65 /* 66 - * Structure and data for smp_call_function(). This is designed to minimise 66 + 5B * Structure and data for smp_call_function(). This is designed to minimise 67 67 * static memory requirements. It also looks cleaner. 68 68 */ 69 69 static DEFINE_SPINLOCK(call_lock); ··· 418 418 /* 419 419 * parameter area for the set/clear control bit callbacks 420 420 */ 421 - typedef struct 422 - { 423 - __u16 start_ctl; 424 - __u16 end_ctl; 421 + struct ec_creg_mask_parms { 425 422 unsigned long orvals[16]; 426 423 unsigned long andvals[16]; 427 - } ec_creg_mask_parms; 424 + }; 428 425 429 426 /* 430 427 * callback for setting/clearing control bits 431 428 */ 432 429 void smp_ctl_bit_callback(void *info) { 433 - ec_creg_mask_parms *pp; 430 + struct ec_creg_mask_parms *pp = info; 434 431 unsigned long cregs[16]; 435 432 int i; 436 433 437 - pp = (ec_creg_mask_parms *) info; 438 - __ctl_store(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl); 439 - for (i = pp->start_ctl; i <= pp->end_ctl; i++) 434 + __ctl_store(cregs, 0, 15); 435 + for (i = 0; i <= 15; i++) 440 436 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i]; 441 - __ctl_load(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl); 437 + __ctl_load(cregs, 0, 15); 442 438 } 443 439 444 440 /* 445 441 * Set a bit in a control register of all cpus 446 442 */ 447 - void smp_ctl_set_bit(int cr, int bit) { 448 - ec_creg_mask_parms parms; 443 + void smp_ctl_set_bit(int cr, int bit) 444 + { 445 + struct ec_creg_mask_parms parms; 449 446 450 - parms.start_ctl = cr; 451 - parms.end_ctl = cr; 447 + memset(&parms.orvals, 0, sizeof(parms.orvals)); 448 + memset(&parms.andvals, 0xff, sizeof(parms.andvals)); 452 449 parms.orvals[cr] = 1 << bit; 453 - parms.andvals[cr] = -1L; 454 - preempt_disable(); 455 - smp_call_function(smp_ctl_bit_callback, &parms, 0, 1); 456 - __ctl_set_bit(cr, bit); 457 - preempt_enable(); 450 + on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); 458 451 } 459 452 460 453 /* 461 454 * Clear a bit in a control register of all cpus 462 455 */ 463 - void smp_ctl_clear_bit(int cr, int bit) { 464 - ec_creg_mask_parms parms; 456 + void smp_ctl_clear_bit(int cr, int bit) 457 + { 458 + struct ec_creg_mask_parms parms; 465 459 466 - parms.start_ctl = cr; 467 - parms.end_ctl = cr; 468 - parms.orvals[cr] = 0; 460 + memset(&parms.orvals, 0, sizeof(parms.orvals)); 461 + memset(&parms.andvals, 0xff, sizeof(parms.andvals)); 469 462 parms.andvals[cr] = ~(1L << bit); 470 - preempt_disable(); 471 - smp_call_function(smp_ctl_bit_callback, &parms, 0, 1); 472 - __ctl_clear_bit(cr, bit); 473 - preempt_enable(); 463 + on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); 474 464 } 475 465 476 466 /* ··· 640 650 sf->gprs[9] = (unsigned long) sf; 641 651 cpu_lowcore->save_area[15] = (unsigned long) sf; 642 652 __ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15); 643 - __asm__ __volatile__("stam 0,15,0(%0)" 644 - : : "a" (&cpu_lowcore->access_regs_save_area) 645 - : "memory"); 653 + asm volatile( 654 + " stam 0,15,0(%0)" 655 + : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); 646 656 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; 647 657 cpu_lowcore->current_task = (unsigned long) idle; 648 658 cpu_lowcore->cpu_data.cpu_nr = cpu; ··· 698 708 __cpu_disable(void) 699 709 { 700 710 unsigned long flags; 701 - ec_creg_mask_parms cr_parms; 711 + struct ec_creg_mask_parms cr_parms; 702 712 int cpu = smp_processor_id(); 703 713 704 714 spin_lock_irqsave(&smp_reserve_lock, flags); ··· 714 724 pfault_fini(); 715 725 #endif 716 726 717 - /* disable all external interrupts */ 727 + memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals)); 728 + memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals)); 718 729 719 - cr_parms.start_ctl = 0; 720 - cr_parms.end_ctl = 0; 730 + /* disable all external interrupts */ 721 731 cr_parms.orvals[0] = 0; 722 732 cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 | 723 733 1<<11 | 1<<10 | 1<< 6 | 1<< 4); 724 - smp_ctl_bit_callback(&cr_parms); 725 - 726 734 /* disable all I/O interrupts */ 727 - 728 - cr_parms.start_ctl = 6; 729 - cr_parms.end_ctl = 6; 730 735 cr_parms.orvals[6] = 0; 731 736 cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 | 732 737 1<<27 | 1<<26 | 1<<25 | 1<<24); 733 - smp_ctl_bit_callback(&cr_parms); 734 - 735 738 /* disable most machine checks */ 736 - 737 - cr_parms.start_ctl = 14; 738 - cr_parms.end_ctl = 14; 739 739 cr_parms.orvals[14] = 0; 740 740 cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24); 741 + 741 742 smp_ctl_bit_callback(&cr_parms); 742 743 743 744 spin_unlock_irqrestore(&smp_reserve_lock, flags);
+6 -4
arch/s390/kernel/time.c
··· 351 351 int cc; 352 352 353 353 /* kick the TOD clock */ 354 - asm volatile ("STCK 0(%1)\n\t" 355 - "IPM %0\n\t" 356 - "SRL %0,28" : "=r" (cc) : "a" (&init_timer_cc) 357 - : "memory", "cc"); 354 + asm volatile( 355 + " stck 0(%2)\n" 356 + " ipm %0\n" 357 + " srl %0,28" 358 + : "=d" (cc), "=m" (init_timer_cc) 359 + : "a" (&init_timer_cc) : "cc"); 358 360 switch (cc) { 359 361 case 0: /* clock in set state: all is fine */ 360 362 break;
+1 -2
arch/s390/kernel/traps.c
··· 597 597 local_irq_enable(); 598 598 599 599 if (MACHINE_HAS_IEEE) 600 - __asm__ volatile ("stfpc %0\n\t" 601 - : "=m" (current->thread.fp_regs.fpc)); 600 + asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); 602 601 603 602 #ifdef CONFIG_MATHEMU 604 603 else if (regs->psw.mask & PSW_MASK_PSTATE) {
+4 -7
arch/s390/lib/delay.c
··· 27 27 * yield the megahertz number of the cpu. The important function 28 28 * is udelay and that is done using the tod clock. -- martin. 29 29 */ 30 - __asm__ __volatile__( 31 - "0: brct %0,0b" 32 - : /* no outputs */ : "r" ((loops/2) + 1)); 30 + asm volatile("0: brct %0,0b" : : "d" ((loops/2) + 1)); 33 31 } 34 32 35 33 /* ··· 36 38 */ 37 39 void __udelay(unsigned long usecs) 38 40 { 39 - uint64_t start_cc, end_cc; 41 + uint64_t start_cc; 40 42 41 43 if (usecs == 0) 42 44 return; 43 - asm volatile ("STCK %0" : "=m" (start_cc)); 45 + start_cc = get_clock(); 44 46 do { 45 47 cpu_relax(); 46 - asm volatile ("STCK %0" : "=m" (end_cc)); 47 - } while (((end_cc - start_cc)/4096) < usecs); 48 + } while (((get_clock() - start_cc)/4096) < usecs); 48 49 }
+62 -64
arch/s390/math-emu/math.c
··· 1564 1564 } 1565 1565 1566 1566 static inline void emu_load_regd(int reg) { 1567 - if ((reg&9) != 0) /* test if reg in {0,2,4,6} */ 1567 + if ((reg&9) != 0) /* test if reg in {0,2,4,6} */ 1568 1568 return; 1569 - asm volatile ( /* load reg from fp_regs.fprs[reg] */ 1570 - " bras 1,0f\n" 1571 - " ld 0,0(%1)\n" 1572 - "0: ex %0,0(1)" 1573 - : /* no output */ 1574 - : "a" (reg<<4),"a" (&current->thread.fp_regs.fprs[reg].d) 1575 - : "1" ); 1569 + asm volatile( /* load reg from fp_regs.fprs[reg] */ 1570 + " bras 1,0f\n" 1571 + " ld 0,0(%1)\n" 1572 + "0: ex %0,0(1)" 1573 + : /* no output */ 1574 + : "a" (reg<<4),"a" (&current->thread.fp_regs.fprs[reg].d) 1575 + : "1"); 1576 1576 } 1577 1577 1578 1578 static inline void emu_load_rege(int reg) { 1579 - if ((reg&9) != 0) /* test if reg in {0,2,4,6} */ 1579 + if ((reg&9) != 0) /* test if reg in {0,2,4,6} */ 1580 1580 return; 1581 - asm volatile ( /* load reg from fp_regs.fprs[reg] */ 1582 - " bras 1,0f\n" 1583 - " le 0,0(%1)\n" 1584 - "0: ex %0,0(1)" 1585 - : /* no output */ 1586 - : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].f) 1587 - : "1" ); 1581 + asm volatile( /* load reg from fp_regs.fprs[reg] */ 1582 + " bras 1,0f\n" 1583 + " le 0,0(%1)\n" 1584 + "0: ex %0,0(1)" 1585 + : /* no output */ 1586 + : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].f) 1587 + : "1"); 1588 1588 } 1589 1589 1590 1590 static inline void emu_store_regd(int reg) { 1591 - if ((reg&9) != 0) /* test if reg in {0,2,4,6} */ 1591 + if ((reg&9) != 0) /* test if reg in {0,2,4,6} */ 1592 1592 return; 1593 - asm volatile ( /* store reg to fp_regs.fprs[reg] */ 1594 - " bras 1,0f\n" 1595 - " std 0,0(%1)\n" 1596 - "0: ex %0,0(1)" 1597 - : /* no output */ 1598 - : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].d) 1599 - : "1" ); 1593 + asm volatile( /* store reg to fp_regs.fprs[reg] */ 1594 + " bras 1,0f\n" 1595 + " std 0,0(%1)\n" 1596 + "0: ex %0,0(1)" 1597 + : /* no output */ 1598 + : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].d) 1599 + : "1"); 1600 1600 } 1601 1601 1602 1602 1603 1603 static inline void emu_store_rege(int reg) { 1604 - if ((reg&9) != 0) /* test if reg in {0,2,4,6} */ 1604 + if ((reg&9) != 0) /* test if reg in {0,2,4,6} */ 1605 1605 return; 1606 - asm volatile ( /* store reg to fp_regs.fprs[reg] */ 1607 - " bras 1,0f\n" 1608 - " ste 0,0(%1)\n" 1609 - "0: ex %0,0(1)" 1610 - : /* no output */ 1611 - : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].f) 1612 - : "1" ); 1606 + asm volatile( /* store reg to fp_regs.fprs[reg] */ 1607 + " bras 1,0f\n" 1608 + " ste 0,0(%1)\n" 1609 + "0: ex %0,0(1)" 1610 + : /* no output */ 1611 + : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].f) 1612 + : "1"); 1613 1613 } 1614 1614 1615 1615 int math_emu_b3(__u8 *opcode, struct pt_regs * regs) { ··· 2089 2089 2090 2090 if ((opc & 0x90) == 0) { /* test if rx in {0,2,4,6} */ 2091 2091 /* we got an exception therfore ry can't be in {0,2,4,6} */ 2092 - __asm__ __volatile ( /* load rx from fp_regs.fprs[ry] */ 2093 - " bras 1,0f\n" 2094 - " ld 0,0(%1)\n" 2095 - "0: ex %0,0(1)" 2096 - : /* no output */ 2097 - : "a" (opc & 0xf0), 2098 - "a" (&fp_regs->fprs[opc & 0xf].d) 2099 - : "1" ); 2092 + asm volatile( /* load rx from fp_regs.fprs[ry] */ 2093 + " bras 1,0f\n" 2094 + " ld 0,0(%1)\n" 2095 + "0: ex %0,0(1)" 2096 + : /* no output */ 2097 + : "a" (opc & 0xf0), "a" (&fp_regs->fprs[opc & 0xf].d) 2098 + : "1"); 2100 2099 } else if ((opc & 0x9) == 0) { /* test if ry in {0,2,4,6} */ 2101 - __asm__ __volatile ( /* store ry to fp_regs.fprs[rx] */ 2102 - " bras 1,0f\n" 2103 - " std 0,0(%1)\n" 2104 - "0: ex %0,0(1)" 2105 - : /* no output */ 2106 - : "a" ((opc & 0xf) << 4), 2107 - "a" (&fp_regs->fprs[(opc & 0xf0)>>4].d) 2108 - : "1" ); 2100 + asm volatile ( /* store ry to fp_regs.fprs[rx] */ 2101 + " bras 1,0f\n" 2102 + " std 0,0(%1)\n" 2103 + "0: ex %0,0(1)" 2104 + : /* no output */ 2105 + : "a" ((opc & 0xf) << 4), 2106 + "a" (&fp_regs->fprs[(opc & 0xf0)>>4].d) 2107 + : "1"); 2109 2108 } else /* move fp_regs.fprs[ry] to fp_regs.fprs[rx] */ 2110 2109 fp_regs->fprs[(opc & 0xf0) >> 4] = fp_regs->fprs[opc & 0xf]; 2111 2110 return 0; ··· 2119 2120 2120 2121 if ((opc & 0x90) == 0) { /* test if rx in {0,2,4,6} */ 2121 2122 /* we got an exception therfore ry can't be in {0,2,4,6} */ 2122 - __asm__ __volatile ( /* load rx from fp_regs.fprs[ry] */ 2123 - " bras 1,0f\n" 2124 - " le 0,0(%1)\n" 2125 - "0: ex %0,0(1)" 2126 - : /* no output */ 2127 - : "a" (opc & 0xf0), 2128 - "a" (&fp_regs->fprs[opc & 0xf].f) 2129 - : "1" ); 2123 + asm volatile( /* load rx from fp_regs.fprs[ry] */ 2124 + " bras 1,0f\n" 2125 + " le 0,0(%1)\n" 2126 + "0: ex %0,0(1)" 2127 + : /* no output */ 2128 + : "a" (opc & 0xf0), "a" (&fp_regs->fprs[opc & 0xf].f) 2129 + : "1"); 2130 2130 } else if ((opc & 0x9) == 0) { /* test if ry in {0,2,4,6} */ 2131 - __asm__ __volatile ( /* store ry to fp_regs.fprs[rx] */ 2132 - " bras 1,0f\n" 2133 - " ste 0,0(%1)\n" 2134 - "0: ex %0,0(1)" 2135 - : /* no output */ 2136 - : "a" ((opc & 0xf) << 4), 2137 - "a" (&fp_regs->fprs[(opc & 0xf0) >> 4].f) 2138 - : "1" ); 2131 + asm volatile( /* store ry to fp_regs.fprs[rx] */ 2132 + " bras 1,0f\n" 2133 + " ste 0,0(%1)\n" 2134 + "0: ex %0,0(1)" 2135 + : /* no output */ 2136 + : "a" ((opc & 0xf) << 4), 2137 + "a" (&fp_regs->fprs[(opc & 0xf0) >> 4].f) 2138 + : "1"); 2139 2139 } else /* move fp_regs.fprs[ry] to fp_regs.fprs[rx] */ 2140 2140 fp_regs->fprs[(opc & 0xf0) >> 4] = fp_regs->fprs[opc & 0xf]; 2141 2141 return 0;
+38 -35
arch/s390/math-emu/sfp-util.h
··· 4 4 #include <asm/byteorder.h> 5 5 6 6 #define add_ssaaaa(sh, sl, ah, al, bh, bl) ({ \ 7 - unsigned int __sh = (ah); \ 8 - unsigned int __sl = (al); \ 9 - __asm__ (" alr %1,%3\n" \ 10 - " brc 12,0f\n" \ 11 - " ahi %0,1\n" \ 12 - "0: alr %0,%2" \ 13 - : "+&d" (__sh), "+d" (__sl) \ 14 - : "d" (bh), "d" (bl) : "cc" ); \ 15 - (sh) = __sh; \ 16 - (sl) = __sl; \ 7 + unsigned int __sh = (ah); \ 8 + unsigned int __sl = (al); \ 9 + asm volatile( \ 10 + " alr %1,%3\n" \ 11 + " brc 12,0f\n" \ 12 + " ahi %0,1\n" \ 13 + "0: alr %0,%2" \ 14 + : "+&d" (__sh), "+d" (__sl) \ 15 + : "d" (bh), "d" (bl) : "cc"); \ 16 + (sh) = __sh; \ 17 + (sl) = __sl; \ 17 18 }) 18 19 19 20 #define sub_ddmmss(sh, sl, ah, al, bh, bl) ({ \ 20 - unsigned int __sh = (ah); \ 21 - unsigned int __sl = (al); \ 22 - __asm__ (" slr %1,%3\n" \ 23 - " brc 3,0f\n" \ 24 - " ahi %0,-1\n" \ 25 - "0: slr %0,%2" \ 26 - : "+&d" (__sh), "+d" (__sl) \ 27 - : "d" (bh), "d" (bl) : "cc" ); \ 28 - (sh) = __sh; \ 29 - (sl) = __sl; \ 21 + unsigned int __sh = (ah); \ 22 + unsigned int __sl = (al); \ 23 + asm volatile( \ 24 + " slr %1,%3\n" \ 25 + " brc 3,0f\n" \ 26 + " ahi %0,-1\n" \ 27 + "0: slr %0,%2" \ 28 + : "+&d" (__sh), "+d" (__sl) \ 29 + : "d" (bh), "d" (bl) : "cc"); \ 30 + (sh) = __sh; \ 31 + (sl) = __sl; \ 30 32 }) 31 33 32 34 /* a umul b = a mul b + (a>=2<<31) ? b<<32:0 + (b>=2<<31) ? a<<32:0 */ 33 35 #define umul_ppmm(wh, wl, u, v) ({ \ 34 - unsigned int __wh = u; \ 35 - unsigned int __wl = v; \ 36 - __asm__ (" ltr 1,%0\n" \ 37 - " mr 0,%1\n" \ 38 - " jnm 0f\n" \ 39 - " alr 0,%1\n" \ 40 - "0: ltr %1,%1\n" \ 41 - " jnm 1f\n" \ 42 - " alr 0,%0\n" \ 43 - "1: lr %0,0\n" \ 44 - " lr %1,1\n" \ 45 - : "+d" (__wh), "+d" (__wl) \ 46 - : : "0", "1", "cc" ); \ 47 - wh = __wh; \ 48 - wl = __wl; \ 36 + unsigned int __wh = u; \ 37 + unsigned int __wl = v; \ 38 + asm volatile( \ 39 + " ltr 1,%0\n" \ 40 + " mr 0,%1\n" \ 41 + " jnm 0f\n" \ 42 + " alr 0,%1\n" \ 43 + "0: ltr %1,%1\n" \ 44 + " jnm 1f\n" \ 45 + " alr 0,%0\n" \ 46 + "1: lr %0,0\n" \ 47 + " lr %1,1\n" \ 48 + : "+d" (__wh), "+d" (__wl) \ 49 + : : "0", "1", "cc"); \ 50 + wh = __wh; \ 51 + wl = __wl; \ 49 52 }) 50 53 51 54 #define udiv_qrnnd(q, r, n1, n0, d) \
+8 -8
arch/s390/mm/extmem.c
··· 142 142 143 143 rx = (unsigned long) parameter; 144 144 ry = (unsigned long) func; 145 - __asm__ __volatile__( 145 + asm volatile( 146 146 #ifdef CONFIG_64BIT 147 - " sam31\n" // switch to 31 bit 148 - " diag %0,%1,0x64\n" 149 - " sam64\n" // switch back to 64 bit 147 + " sam31\n" 148 + " diag %0,%1,0x64\n" 149 + " sam64\n" 150 150 #else 151 - " diag %0,%1,0x64\n" 151 + " diag %0,%1,0x64\n" 152 152 #endif 153 - " ipm %2\n" 154 - " srl %2,28\n" 155 - : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc" ); 153 + " ipm %2\n" 154 + " srl %2,28\n" 155 + : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc"); 156 156 *ret1 = rx; 157 157 *ret2 = ry; 158 158 return rc;
+10 -24
arch/s390/mm/fault.c
··· 424 424 425 425 if (pfault_disable) 426 426 return -1; 427 - __asm__ __volatile__( 428 - " diag %1,%0,0x258\n" 429 - "0: j 2f\n" 430 - "1: la %0,8\n" 427 + asm volatile( 428 + " diag %1,%0,0x258\n" 429 + "0: j 2f\n" 430 + "1: la %0,8\n" 431 431 "2:\n" 432 - ".section __ex_table,\"a\"\n" 433 - " .align 4\n" 434 - #ifndef CONFIG_64BIT 435 - " .long 0b,1b\n" 436 - #else /* CONFIG_64BIT */ 437 - " .quad 0b,1b\n" 438 - #endif /* CONFIG_64BIT */ 439 - ".previous" 440 - : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc" ); 432 + EX_TABLE(0b,1b) 433 + : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc"); 441 434 __ctl_set_bit(0, 9); 442 435 return rc; 443 436 } ··· 443 450 if (pfault_disable) 444 451 return; 445 452 __ctl_clear_bit(0,9); 446 - __asm__ __volatile__( 447 - " diag %0,0,0x258\n" 453 + asm volatile( 454 + " diag %0,0,0x258\n" 448 455 "0:\n" 449 - ".section __ex_table,\"a\"\n" 450 - " .align 4\n" 451 - #ifndef CONFIG_64BIT 452 - " .long 0b,0b\n" 453 - #else /* CONFIG_64BIT */ 454 - " .quad 0b,0b\n" 455 - #endif /* CONFIG_64BIT */ 456 - ".previous" 457 - : : "a" (&refbk), "m" (refbk) : "cc" ); 456 + EX_TABLE(0b,0b) 457 + : : "a" (&refbk), "m" (refbk) : "cc"); 458 458 } 459 459 460 460 asmlinkage void
+15 -26
arch/s390/mm/init.c
··· 45 45 { 46 46 if (addr >= 0x7ff00000) 47 47 return; 48 + asm volatile( 48 49 #ifdef CONFIG_64BIT 49 - asm volatile ( 50 - " sam31\n" 51 - " diag %0,%0,0x10\n" 52 - "0: sam64\n" 53 - ".section __ex_table,\"a\"\n" 54 - " .align 8\n" 55 - " .quad 0b, 0b\n" 56 - ".previous\n" 57 - : : "a" (addr)); 50 + " sam31\n" 51 + " diag %0,%0,0x10\n" 52 + "0: sam64\n" 58 53 #else 59 - asm volatile ( 60 - " diag %0,%0,0x10\n" 54 + " diag %0,%0,0x10\n" 61 55 "0:\n" 62 - ".section __ex_table,\"a\"\n" 63 - " .align 4\n" 64 - " .long 0b, 0b\n" 65 - ".previous\n" 66 - : : "a" (addr)); 67 56 #endif 57 + EX_TABLE(0b,0b) 58 + : : "a" (addr)); 68 59 } 69 60 70 61 void show_mem(void) ··· 147 156 S390_lowcore.kernel_asce = pgdir_k; 148 157 149 158 /* enable virtual mapping in kernel mode */ 150 - __asm__ __volatile__(" LCTL 1,1,%0\n" 151 - " LCTL 7,7,%0\n" 152 - " LCTL 13,13,%0\n" 153 - " SSM %1" 154 - : : "m" (pgdir_k), "m" (ssm_mask)); 159 + __ctl_load(pgdir_k, 1, 1); 160 + __ctl_load(pgdir_k, 7, 7); 161 + __ctl_load(pgdir_k, 13, 13); 162 + __raw_local_irq_ssm(ssm_mask); 155 163 156 164 local_flush_tlb(); 157 165 return; ··· 231 241 S390_lowcore.kernel_asce = pgdir_k; 232 242 233 243 /* enable virtual mapping in kernel mode */ 234 - __asm__ __volatile__("lctlg 1,1,%0\n\t" 235 - "lctlg 7,7,%0\n\t" 236 - "lctlg 13,13,%0\n\t" 237 - "ssm %1" 238 - : :"m" (pgdir_k), "m" (ssm_mask)); 244 + __ctl_load(pgdir_k, 1, 1); 245 + __ctl_load(pgdir_k, 7, 7); 246 + __ctl_load(pgdir_k, 13, 13); 247 + __raw_local_irq_ssm(ssm_mask); 239 248 240 249 local_flush_tlb(); 241 250
+8 -26
drivers/s390/block/dasd_diag.c
··· 63 63 * and function code cmd. 64 64 * In case of an exception return 3. Otherwise return result of bitwise OR of 65 65 * resulting condition code and DIAG return code. */ 66 - static __inline__ int 67 - dia250(void *iob, int cmd) 66 + static inline int dia250(void *iob, int cmd) 68 67 { 68 + register unsigned long reg0 asm ("0") = (unsigned long) iob; 69 69 typedef union { 70 70 struct dasd_diag_init_io init_io; 71 71 struct dasd_diag_rw_io rw_io; 72 72 } addr_type; 73 73 int rc; 74 74 75 - __asm__ __volatile__( 76 - #ifdef CONFIG_64BIT 77 - " lghi %0,3\n" 78 - " lgr 0,%3\n" 75 + rc = 3; 76 + asm volatile( 79 77 " diag 0,%2,0x250\n" 80 78 "0: ipm %0\n" 81 79 " srl %0,28\n" 82 80 " or %0,1\n" 83 81 "1:\n" 84 - ".section __ex_table,\"a\"\n" 85 - " .align 8\n" 86 - " .quad 0b,1b\n" 87 - ".previous\n" 88 - #else 89 - " lhi %0,3\n" 90 - " lr 0,%3\n" 91 - " diag 0,%2,0x250\n" 92 - "0: ipm %0\n" 93 - " srl %0,28\n" 94 - " or %0,1\n" 95 - "1:\n" 96 - ".section __ex_table,\"a\"\n" 97 - " .align 4\n" 98 - " .long 0b,1b\n" 99 - ".previous\n" 100 - #endif 101 - : "=&d" (rc), "=m" (*(addr_type *) iob) 102 - : "d" (cmd), "d" (iob), "m" (*(addr_type *) iob) 103 - : "0", "1", "cc"); 82 + EX_TABLE(0b,1b) 83 + : "+d" (rc), "=m" (*(addr_type *) iob) 84 + : "d" (cmd), "d" (reg0), "m" (*(addr_type *) iob) 85 + : "1", "cc"); 104 86 return rc; 105 87 } 106 88
+14 -40
drivers/s390/block/xpram.c
··· 89 89 */ 90 90 static int xpram_page_in (unsigned long page_addr, unsigned int xpage_index) 91 91 { 92 - int cc; 92 + int cc = 2; /* return unused cc 2 if pgin traps */ 93 93 94 - __asm__ __volatile__ ( 95 - " lhi %0,2\n" /* return unused cc 2 if pgin traps */ 96 - " .insn rre,0xb22e0000,%1,%2\n" /* pgin %1,%2 */ 97 - "0: ipm %0\n" 98 - " srl %0,28\n" 94 + asm volatile( 95 + " .insn rre,0xb22e0000,%1,%2\n" /* pgin %1,%2 */ 96 + "0: ipm %0\n" 97 + " srl %0,28\n" 99 98 "1:\n" 100 - #ifndef CONFIG_64BIT 101 - ".section __ex_table,\"a\"\n" 102 - " .align 4\n" 103 - " .long 0b,1b\n" 104 - ".previous" 105 - #else 106 - ".section __ex_table,\"a\"\n" 107 - " .align 8\n" 108 - " .quad 0b,1b\n" 109 - ".previous" 110 - #endif 111 - : "=&d" (cc) 112 - : "a" (__pa(page_addr)), "a" (xpage_index) 113 - : "cc" ); 99 + EX_TABLE(0b,1b) 100 + : "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc"); 114 101 if (cc == 3) 115 102 return -ENXIO; 116 103 if (cc == 2) { ··· 124 137 */ 125 138 static long xpram_page_out (unsigned long page_addr, unsigned int xpage_index) 126 139 { 127 - int cc; 140 + int cc = 2; /* return unused cc 2 if pgin traps */ 128 141 129 - __asm__ __volatile__ ( 130 - " lhi %0,2\n" /* return unused cc 2 if pgout traps */ 131 - " .insn rre,0xb22f0000,%1,%2\n" /* pgout %1,%2 */ 132 - "0: ipm %0\n" 133 - " srl %0,28\n" 142 + asm volatile( 143 + " .insn rre,0xb22f0000,%1,%2\n" /* pgout %1,%2 */ 144 + "0: ipm %0\n" 145 + " srl %0,28\n" 134 146 "1:\n" 135 - #ifndef CONFIG_64BIT 136 - ".section __ex_table,\"a\"\n" 137 - " .align 4\n" 138 - " .long 0b,1b\n" 139 - ".previous" 140 - #else 141 - ".section __ex_table,\"a\"\n" 142 - " .align 8\n" 143 - " .quad 0b,1b\n" 144 - ".previous" 145 - #endif 146 - : "=&d" (cc) 147 - : "a" (__pa(page_addr)), "a" (xpage_index) 148 - : "cc" ); 147 + EX_TABLE(0b,1b) 148 + : "+d" (cc) : "a" (__pa(page_addr)), "d" (xpage_index) : "cc"); 149 149 if (cc == 3) 150 150 return -ENXIO; 151 151 if (cc == 2) {
+9 -22
drivers/s390/char/sclp.c
··· 100 100 { 101 101 int cc; 102 102 103 - __asm__ __volatile__( 104 - " .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */ 105 - " ipm %0\n" 106 - " srl %0,28" 107 - : "=&d" (cc) 108 - : "d" (command), "a" (__pa(sccb)) 109 - : "cc", "memory" ); 103 + asm volatile( 104 + " .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */ 105 + " ipm %0\n" 106 + " srl %0,28" 107 + : "=&d" (cc) : "d" (command), "a" (__pa(sccb)) 108 + : "cc", "memory"); 110 109 if (cc == 3) 111 110 return -EIO; 112 111 if (cc == 2) ··· 359 360 sclp_process_queue(); 360 361 } 361 362 362 - /* Return current Time-Of-Day clock. */ 363 - static inline u64 364 - sclp_get_clock(void) 365 - { 366 - u64 result; 367 - 368 - asm volatile ("STCK 0(%1)" : "=m" (result) : "a" (&(result)) : "cc"); 369 - return result; 370 - } 371 - 372 363 /* Convert interval in jiffies to TOD ticks. */ 373 364 static inline u64 374 365 sclp_tod_from_jiffies(unsigned long jiffies) ··· 371 382 void 372 383 sclp_sync_wait(void) 373 384 { 374 - unsigned long psw_mask; 375 385 unsigned long flags; 376 386 unsigned long cr0, cr0_sync; 377 387 u64 timeout; ··· 380 392 timeout = 0; 381 393 if (timer_pending(&sclp_request_timer)) { 382 394 /* Get timeout TOD value */ 383 - timeout = sclp_get_clock() + 395 + timeout = get_clock() + 384 396 sclp_tod_from_jiffies(sclp_request_timer.expires - 385 397 jiffies); 386 398 } ··· 394 406 cr0_sync |= 0x00000200; 395 407 cr0_sync &= 0xFFFFF3AC; 396 408 __ctl_load(cr0_sync, 0, 0); 397 - asm volatile ("STOSM 0(%1),0x01" 398 - : "=m" (psw_mask) : "a" (&psw_mask) : "memory"); 409 + __raw_local_irq_stosm(0x01); 399 410 /* Loop until driver state indicates finished request */ 400 411 while (sclp_running_state != sclp_running_state_idle) { 401 412 /* Check for expired request timer */ 402 413 if (timer_pending(&sclp_request_timer) && 403 - sclp_get_clock() > timeout && 414 + get_clock() > timeout && 404 415 del_timer(&sclp_request_timer)) 405 416 sclp_request_timer.function(sclp_request_timer.data); 406 417 barrier();
+12 -40
drivers/s390/char/vmwatchdog.c
··· 54 54 static int __diag288(enum vmwdt_func func, unsigned int timeout, 55 55 char *cmd, size_t len) 56 56 { 57 - register unsigned long __func asm("2"); 58 - register unsigned long __timeout asm("3"); 59 - register unsigned long __cmdp asm("4"); 60 - register unsigned long __cmdl asm("5"); 57 + register unsigned long __func asm("2") = func; 58 + register unsigned long __timeout asm("3") = timeout; 59 + register unsigned long __cmdp asm("4") = virt_to_phys(cmd); 60 + register unsigned long __cmdl asm("5") = len; 61 61 int err; 62 62 63 - __func = func; 64 - __timeout = timeout; 65 - __cmdp = virt_to_phys(cmd); 66 - __cmdl = len; 67 - err = 0; 68 - asm volatile ( 69 - #ifdef CONFIG_64BIT 70 - "diag %2,%4,0x288\n" 71 - "1: \n" 72 - ".section .fixup,\"ax\"\n" 73 - "2: lghi %0,%1\n" 74 - " jg 1b\n" 75 - ".previous\n" 76 - ".section __ex_table,\"a\"\n" 77 - " .align 8\n" 78 - " .quad 1b,2b\n" 79 - ".previous\n" 80 - #else 81 - "diag %2,%4,0x288\n" 82 - "1: \n" 83 - ".section .fixup,\"ax\"\n" 84 - "2: lhi %0,%1\n" 85 - " bras 1,3f\n" 86 - " .long 1b\n" 87 - "3: l 1,0(1)\n" 88 - " br 1\n" 89 - ".previous\n" 90 - ".section __ex_table,\"a\"\n" 91 - " .align 4\n" 92 - " .long 1b,2b\n" 93 - ".previous\n" 94 - #endif 95 - : "+&d"(err) 96 - : "i"(-EINVAL), "d"(__func), "d"(__timeout), 97 - "d"(__cmdp), "d"(__cmdl) 98 - : "1", "cc"); 63 + err = -EINVAL; 64 + asm volatile( 65 + " diag %1,%3,0x288\n" 66 + "0: la %0,0\n" 67 + "1:\n" 68 + EX_TABLE(0b,1b) 69 + : "=d" (err) : "d"(__func), "d"(__timeout), 70 + "d"(__cmdp), "d"(__cmdl), "0" (-EINVAL) : "1", "cc"); 99 71 return err; 100 72 } 101 73
+16 -22
drivers/s390/cio/device_id.c
··· 42 42 spin_lock_irqsave(&diag210_lock, flags); 43 43 diag210_tmp = *addr; 44 44 45 - asm volatile ( 46 - " lhi %0,-1\n" 47 - " sam31\n" 48 - " diag %1,0,0x210\n" 49 - "0: ipm %0\n" 50 - " srl %0,28\n" 51 - "1: sam64\n" 52 - ".section __ex_table,\"a\"\n" 53 - " .align 8\n" 54 - " .quad 0b,1b\n" 55 - ".previous" 56 - : "=&d" (ccode) : "a" (__pa(&diag210_tmp)) : "cc", "memory" ); 45 + asm volatile( 46 + " lhi %0,-1\n" 47 + " sam31\n" 48 + " diag %1,0,0x210\n" 49 + "0: ipm %0\n" 50 + " srl %0,28\n" 51 + "1: sam64\n" 52 + EX_TABLE(0b,1b) 53 + : "=&d" (ccode) : "a" (__pa(&diag210_tmp)) : "cc", "memory"); 57 54 58 55 *addr = diag210_tmp; 59 56 spin_unlock_irqrestore(&diag210_lock, flags); ··· 63 66 { 64 67 int ccode; 65 68 66 - asm volatile ( 67 - " lhi %0,-1\n" 68 - " diag %1,0,0x210\n" 69 - "0: ipm %0\n" 70 - " srl %0,28\n" 69 + asm volatile( 70 + " lhi %0,-1\n" 71 + " diag %1,0,0x210\n" 72 + "0: ipm %0\n" 73 + " srl %0,28\n" 71 74 "1:\n" 72 - ".section __ex_table,\"a\"\n" 73 - " .align 4\n" 74 - " .long 0b,1b\n" 75 - ".previous" 76 - : "=&d" (ccode) : "a" (__pa(addr)) : "cc", "memory" ); 75 + EX_TABLE(0b,1b) 76 + : "=&d" (ccode) : "a" (__pa(addr)) : "cc", "memory"); 77 77 78 78 return ccode; 79 79 }
+81 -139
drivers/s390/cio/ioasm.h
··· 25 25 static inline int stsch(struct subchannel_id schid, 26 26 volatile struct schib *addr) 27 27 { 28 + register struct subchannel_id reg1 asm ("1") = schid; 28 29 int ccode; 29 30 30 - __asm__ __volatile__( 31 - " lr 1,%1\n" 32 - " stsch 0(%2)\n" 33 - " ipm %0\n" 34 - " srl %0,28" 35 - : "=d" (ccode) 36 - : "d" (schid), "a" (addr), "m" (*addr) 37 - : "cc", "1" ); 31 + asm volatile( 32 + " stsch 0(%2)\n" 33 + " ipm %0\n" 34 + " srl %0,28" 35 + : "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc"); 38 36 return ccode; 39 37 } 40 38 41 39 static inline int stsch_err(struct subchannel_id schid, 42 40 volatile struct schib *addr) 43 41 { 44 - int ccode; 42 + register struct subchannel_id reg1 asm ("1") = schid; 43 + int ccode = -EIO; 45 44 46 - __asm__ __volatile__( 47 - " lhi %0,%3\n" 48 - " lr 1,%1\n" 49 - " stsch 0(%2)\n" 50 - "0: ipm %0\n" 51 - " srl %0,28\n" 45 + asm volatile( 46 + " stsch 0(%2)\n" 47 + "0: ipm %0\n" 48 + " srl %0,28\n" 52 49 "1:\n" 53 - #ifdef CONFIG_64BIT 54 - ".section __ex_table,\"a\"\n" 55 - " .align 8\n" 56 - " .quad 0b,1b\n" 57 - ".previous" 58 - #else 59 - ".section __ex_table,\"a\"\n" 60 - " .align 4\n" 61 - " .long 0b,1b\n" 62 - ".previous" 63 - #endif 64 - : "=&d" (ccode) 65 - : "d" (schid), "a" (addr), "K" (-EIO), "m" (*addr) 66 - : "cc", "1" ); 50 + EX_TABLE(0b,1b) 51 + : "+d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc"); 67 52 return ccode; 68 53 } 69 54 70 55 static inline int msch(struct subchannel_id schid, 71 56 volatile struct schib *addr) 72 57 { 58 + register struct subchannel_id reg1 asm ("1") = schid; 73 59 int ccode; 74 60 75 - __asm__ __volatile__( 76 - " lr 1,%1\n" 77 - " msch 0(%2)\n" 78 - " ipm %0\n" 79 - " srl %0,28" 80 - : "=d" (ccode) 81 - : "d" (schid), "a" (addr), "m" (*addr) 82 - : "cc", "1" ); 61 + asm volatile( 62 + " msch 0(%2)\n" 63 + " ipm %0\n" 64 + " srl %0,28" 65 + : "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc"); 83 66 return ccode; 84 67 } 85 68 86 69 static inline int msch_err(struct subchannel_id schid, 87 70 volatile struct schib *addr) 88 71 { 89 - int ccode; 72 + register struct subchannel_id reg1 asm ("1") = schid; 73 + int ccode = -EIO; 90 74 91 - __asm__ __volatile__( 92 - " lhi %0,%3\n" 93 - " lr 1,%1\n" 94 - " msch 0(%2)\n" 95 - "0: ipm %0\n" 96 - " srl %0,28\n" 75 + asm volatile( 76 + " msch 0(%2)\n" 77 + "0: ipm %0\n" 78 + " srl %0,28\n" 97 79 "1:\n" 98 - #ifdef CONFIG_64BIT 99 - ".section __ex_table,\"a\"\n" 100 - " .align 8\n" 101 - " .quad 0b,1b\n" 102 - ".previous" 103 - #else 104 - ".section __ex_table,\"a\"\n" 105 - " .align 4\n" 106 - " .long 0b,1b\n" 107 - ".previous" 108 - #endif 109 - : "=&d" (ccode) 110 - : "d" (schid), "a" (addr), "K" (-EIO), "m" (*addr) 111 - : "cc", "1" ); 80 + EX_TABLE(0b,1b) 81 + : "+d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc"); 112 82 return ccode; 113 83 } 114 84 115 85 static inline int tsch(struct subchannel_id schid, 116 86 volatile struct irb *addr) 117 87 { 88 + register struct subchannel_id reg1 asm ("1") = schid; 118 89 int ccode; 119 90 120 - __asm__ __volatile__( 121 - " lr 1,%1\n" 122 - " tsch 0(%2)\n" 123 - " ipm %0\n" 124 - " srl %0,28" 125 - : "=d" (ccode) 126 - : "d" (schid), "a" (addr), "m" (*addr) 127 - : "cc", "1" ); 91 + asm volatile( 92 + " tsch 0(%2)\n" 93 + " ipm %0\n" 94 + " srl %0,28" 95 + : "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc"); 128 96 return ccode; 129 97 } 130 98 ··· 100 132 { 101 133 int ccode; 102 134 103 - __asm__ __volatile__( 104 - " tpi 0(%1)\n" 105 - " ipm %0\n" 106 - " srl %0,28" 107 - : "=d" (ccode) 108 - : "a" (addr), "m" (*addr) 109 - : "cc", "1" ); 135 + asm volatile( 136 + " tpi 0(%1)\n" 137 + " ipm %0\n" 138 + " srl %0,28" 139 + : "=d" (ccode) : "a" (addr), "m" (*addr) : "cc"); 110 140 return ccode; 111 141 } 112 142 113 143 static inline int ssch(struct subchannel_id schid, 114 144 volatile struct orb *addr) 115 145 { 146 + register struct subchannel_id reg1 asm ("1") = schid; 116 147 int ccode; 117 148 118 - __asm__ __volatile__( 119 - " lr 1,%1\n" 120 - " ssch 0(%2)\n" 121 - " ipm %0\n" 122 - " srl %0,28" 123 - : "=d" (ccode) 124 - : "d" (schid), "a" (addr), "m" (*addr) 125 - : "cc", "1" ); 149 + asm volatile( 150 + " ssch 0(%2)\n" 151 + " ipm %0\n" 152 + " srl %0,28" 153 + : "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc"); 126 154 return ccode; 127 155 } 128 156 129 157 static inline int rsch(struct subchannel_id schid) 130 158 { 159 + register struct subchannel_id reg1 asm ("1") = schid; 131 160 int ccode; 132 161 133 - __asm__ __volatile__( 134 - " lr 1,%1\n" 135 - " rsch\n" 136 - " ipm %0\n" 137 - " srl %0,28" 138 - : "=d" (ccode) 139 - : "d" (schid) 140 - : "cc", "1" ); 162 + asm volatile( 163 + " rsch\n" 164 + " ipm %0\n" 165 + " srl %0,28" 166 + : "=d" (ccode) : "d" (reg1) : "cc"); 141 167 return ccode; 142 168 } 143 169 144 170 static inline int csch(struct subchannel_id schid) 145 171 { 172 + register struct subchannel_id reg1 asm ("1") = schid; 146 173 int ccode; 147 174 148 - __asm__ __volatile__( 149 - " lr 1,%1\n" 150 - " csch\n" 151 - " ipm %0\n" 152 - " srl %0,28" 153 - : "=d" (ccode) 154 - : "d" (schid) 155 - : "cc", "1" ); 175 + asm volatile( 176 + " csch\n" 177 + " ipm %0\n" 178 + " srl %0,28" 179 + : "=d" (ccode) : "d" (reg1) : "cc"); 156 180 return ccode; 157 181 } 158 182 159 183 static inline int hsch(struct subchannel_id schid) 160 184 { 185 + register struct subchannel_id reg1 asm ("1") = schid; 161 186 int ccode; 162 187 163 - __asm__ __volatile__( 164 - " lr 1,%1\n" 165 - " hsch\n" 166 - " ipm %0\n" 167 - " srl %0,28" 168 - : "=d" (ccode) 169 - : "d" (schid) 170 - : "cc", "1" ); 188 + asm volatile( 189 + " hsch\n" 190 + " ipm %0\n" 191 + " srl %0,28" 192 + : "=d" (ccode) : "d" (reg1) : "cc"); 171 193 return ccode; 172 194 } 173 195 174 196 static inline int xsch(struct subchannel_id schid) 175 197 { 198 + register struct subchannel_id reg1 asm ("1") = schid; 176 199 int ccode; 177 200 178 - __asm__ __volatile__( 179 - " lr 1,%1\n" 180 - " .insn rre,0xb2760000,%1,0\n" 181 - " ipm %0\n" 182 - " srl %0,28" 183 - : "=d" (ccode) 184 - : "d" (schid) 185 - : "cc", "1" ); 201 + asm volatile( 202 + " .insn rre,0xb2760000,%1,0\n" 203 + " ipm %0\n" 204 + " srl %0,28" 205 + : "=d" (ccode) : "d" (reg1) : "cc"); 186 206 return ccode; 187 207 } 188 208 ··· 179 223 typedef struct { char _[4096]; } addr_type; 180 224 int cc; 181 225 182 - __asm__ __volatile__ ( 183 - ".insn rre,0xb25f0000,%2,0 \n\t" 184 - "ipm %0 \n\t" 185 - "srl %0,28 \n\t" 226 + asm volatile( 227 + " .insn rre,0xb25f0000,%2,0\n" 228 + " ipm %0\n" 229 + " srl %0,28\n" 186 230 : "=d" (cc), "=m" (*(addr_type *) chsc_area) 187 231 : "d" (chsc_area), "m" (*(addr_type *) chsc_area) 188 - : "cc" ); 189 - 232 + : "cc"); 190 233 return cc; 191 - } 192 - 193 - static inline int iac( void) 194 - { 195 - int ccode; 196 - 197 - __asm__ __volatile__( 198 - " iac 1\n" 199 - " ipm %0\n" 200 - " srl %0,28" 201 - : "=d" (ccode) : : "cc", "1" ); 202 - return ccode; 203 234 } 204 235 205 236 static inline int rchp(int chpid) 206 237 { 238 + register unsigned int reg1 asm ("1") = chpid; 207 239 int ccode; 208 240 209 - __asm__ __volatile__( 210 - " lr 1,%1\n" 211 - " rchp\n" 212 - " ipm %0\n" 213 - " srl %0,28" 214 - : "=d" (ccode) 215 - : "d" (chpid) 216 - : "cc", "1" ); 241 + asm volatile( 242 + " lr 1,%1\n" 243 + " rchp\n" 244 + " ipm %0\n" 245 + " srl %0,28" 246 + : "=d" (ccode) : "d" (reg1) : "cc"); 217 247 return ccode; 218 248 } 219 249
+47 -141
drivers/s390/cio/qdio.h
··· 274 274 register unsigned long _sch asm ("1") = sch; 275 275 unsigned long _queuestart = ((unsigned long)queue << 32) | *start; 276 276 277 - asm volatile ( 278 - " .insn rsy,0xeb000000008A,%1,0,0(%2)\n\t" 279 - : "+d" (_ccq), "+d" (_queuestart) 280 - : "d" ((unsigned long)state), "d" (_sch) 281 - : "memory", "cc" 282 - ); 277 + asm volatile( 278 + " .insn rsy,0xeb000000008A,%1,0,0(%2)" 279 + : "+d" (_ccq), "+d" (_queuestart) 280 + : "d" ((unsigned long)state), "d" (_sch) 281 + : "memory", "cc"); 283 282 *count = _ccq & 0xff; 284 283 *start = _queuestart & 0xff; 285 284 ··· 298 299 unsigned long _queuestart = ((unsigned long)queue << 32) | *start; 299 300 unsigned long _state = 0; 300 301 301 - asm volatile ( 302 - " .insn rrf,0xB99c0000,%1,%2,0,0 \n\t" 303 - : "+d" (_ccq), "+d" (_queuestart), "+d" (_state) 304 - : "d" (_sch) 305 - : "memory", "cc" 306 - ); 302 + asm volatile( 303 + " .insn rrf,0xB99c0000,%1,%2,0,0" 304 + : "+d" (_ccq), "+d" (_queuestart), "+d" (_state) 305 + : "d" (_sch) 306 + : "memory", "cc" ); 307 307 *count = _ccq & 0xff; 308 308 *start = _queuestart & 0xff; 309 309 *state = _state & 0xff; ··· 317 319 static inline int 318 320 do_siga_sync(struct subchannel_id schid, unsigned int mask1, unsigned int mask2) 319 321 { 322 + register unsigned long reg0 asm ("0") = 2; 323 + register struct subchannel_id reg1 asm ("1") = schid; 324 + register unsigned long reg2 asm ("2") = mask1; 325 + register unsigned long reg3 asm ("3") = mask2; 320 326 int cc; 321 327 322 - #ifndef CONFIG_64BIT 323 - asm volatile ( 324 - "lhi 0,2 \n\t" 325 - "lr 1,%1 \n\t" 326 - "lr 2,%2 \n\t" 327 - "lr 3,%3 \n\t" 328 - "siga 0 \n\t" 329 - "ipm %0 \n\t" 330 - "srl %0,28 \n\t" 328 + asm volatile( 329 + " siga 0\n" 330 + " ipm %0\n" 331 + " srl %0,28\n" 331 332 : "=d" (cc) 332 - : "d" (schid), "d" (mask1), "d" (mask2) 333 - : "cc", "0", "1", "2", "3" 334 - ); 335 - #else /* CONFIG_64BIT */ 336 - asm volatile ( 337 - "lghi 0,2 \n\t" 338 - "llgfr 1,%1 \n\t" 339 - "llgfr 2,%2 \n\t" 340 - "llgfr 3,%3 \n\t" 341 - "siga 0 \n\t" 342 - "ipm %0 \n\t" 343 - "srl %0,28 \n\t" 344 - : "=d" (cc) 345 - : "d" (schid), "d" (mask1), "d" (mask2) 346 - : "cc", "0", "1", "2", "3" 347 - ); 348 - #endif /* CONFIG_64BIT */ 333 + : "d" (reg0), "d" (reg1), "d" (reg2), "d" (reg3) : "cc"); 349 334 return cc; 350 335 } 351 336 352 337 static inline int 353 338 do_siga_input(struct subchannel_id schid, unsigned int mask) 354 339 { 340 + register unsigned long reg0 asm ("0") = 1; 341 + register struct subchannel_id reg1 asm ("1") = schid; 342 + register unsigned long reg2 asm ("2") = mask; 355 343 int cc; 356 344 357 - #ifndef CONFIG_64BIT 358 - asm volatile ( 359 - "lhi 0,1 \n\t" 360 - "lr 1,%1 \n\t" 361 - "lr 2,%2 \n\t" 362 - "siga 0 \n\t" 363 - "ipm %0 \n\t" 364 - "srl %0,28 \n\t" 345 + asm volatile( 346 + " siga 0\n" 347 + " ipm %0\n" 348 + " srl %0,28\n" 365 349 : "=d" (cc) 366 - : "d" (schid), "d" (mask) 367 - : "cc", "0", "1", "2", "memory" 368 - ); 369 - #else /* CONFIG_64BIT */ 370 - asm volatile ( 371 - "lghi 0,1 \n\t" 372 - "llgfr 1,%1 \n\t" 373 - "llgfr 2,%2 \n\t" 374 - "siga 0 \n\t" 375 - "ipm %0 \n\t" 376 - "srl %0,28 \n\t" 377 - : "=d" (cc) 378 - : "d" (schid), "d" (mask) 379 - : "cc", "0", "1", "2", "memory" 380 - ); 381 - #endif /* CONFIG_64BIT */ 382 - 350 + : "d" (reg0), "d" (reg1), "d" (reg2) : "cc", "memory"); 383 351 return cc; 384 352 } 385 353 ··· 353 389 do_siga_output(unsigned long schid, unsigned long mask, __u32 *bb, 354 390 unsigned int fc) 355 391 { 392 + register unsigned long __fc asm("0") = fc; 393 + register unsigned long __schid asm("1") = schid; 394 + register unsigned long __mask asm("2") = mask; 356 395 int cc; 357 - __u32 busy_bit; 358 396 359 - #ifndef CONFIG_64BIT 360 - asm volatile ( 361 - "lhi 0,0 \n\t" 362 - "lr 1,%2 \n\t" 363 - "lr 2,%3 \n\t" 364 - "siga 0 \n\t" 365 - "0:" 366 - "ipm %0 \n\t" 367 - "srl %0,28 \n\t" 368 - "srl 0,31 \n\t" 369 - "lr %1,0 \n\t" 370 - "1: \n\t" 371 - ".section .fixup,\"ax\"\n\t" 372 - "2: \n\t" 373 - "lhi %0,%4 \n\t" 374 - "bras 1,3f \n\t" 375 - ".long 1b \n\t" 376 - "3: \n\t" 377 - "l 1,0(1) \n\t" 378 - "br 1 \n\t" 379 - ".previous \n\t" 380 - ".section __ex_table,\"a\"\n\t" 381 - ".align 4 \n\t" 382 - ".long 0b,2b \n\t" 383 - ".previous \n\t" 384 - : "=d" (cc), "=d" (busy_bit) 385 - : "d" (schid), "d" (mask), 386 - "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION) 387 - : "cc", "0", "1", "2", "memory" 388 - ); 389 - #else /* CONFIG_64BIT */ 390 - asm volatile ( 391 - "llgfr 0,%5 \n\t" 392 - "lgr 1,%2 \n\t" 393 - "llgfr 2,%3 \n\t" 394 - "siga 0 \n\t" 395 - "0:" 396 - "ipm %0 \n\t" 397 - "srl %0,28 \n\t" 398 - "srl 0,31 \n\t" 399 - "llgfr %1,0 \n\t" 400 - "1: \n\t" 401 - ".section .fixup,\"ax\"\n\t" 402 - "lghi %0,%4 \n\t" 403 - "jg 1b \n\t" 404 - ".previous\n\t" 405 - ".section __ex_table,\"a\"\n\t" 406 - ".align 8 \n\t" 407 - ".quad 0b,1b \n\t" 408 - ".previous \n\t" 409 - : "=d" (cc), "=d" (busy_bit) 410 - : "d" (schid), "d" (mask), 411 - "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION), "d" (fc) 412 - : "cc", "0", "1", "2", "memory" 413 - ); 414 - #endif /* CONFIG_64BIT */ 415 - 416 - (*bb) = busy_bit; 397 + asm volatile( 398 + " siga 0\n" 399 + "0: ipm %0\n" 400 + " srl %0,28\n" 401 + "1:\n" 402 + EX_TABLE(0b,1b) 403 + : "=d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask) 404 + : "0" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION) 405 + : "cc", "memory"); 406 + (*bb) = ((unsigned int) __fc) >> 31; 417 407 return cc; 418 408 } 419 409 420 410 static inline unsigned long 421 411 do_clear_global_summary(void) 422 412 { 413 + register unsigned long __fn asm("1") = 3; 414 + register unsigned long __tmp asm("2"); 415 + register unsigned long __time asm("3"); 423 416 424 - unsigned long time; 425 - 426 - #ifndef CONFIG_64BIT 427 - asm volatile ( 428 - "lhi 1,3 \n\t" 429 - ".insn rre,0xb2650000,2,0 \n\t" 430 - "lr %0,3 \n\t" 431 - : "=d" (time) : : "cc", "1", "2", "3" 432 - ); 433 - #else /* CONFIG_64BIT */ 434 - asm volatile ( 435 - "lghi 1,3 \n\t" 436 - ".insn rre,0xb2650000,2,0 \n\t" 437 - "lgr %0,3 \n\t" 438 - : "=d" (time) : : "cc", "1", "2", "3" 439 - ); 440 - #endif /* CONFIG_64BIT */ 441 - 442 - return time; 417 + asm volatile( 418 + " .insn rre,0xb2650000,2,0" 419 + : "+d" (__fn), "=d" (__tmp), "=d" (__time)); 420 + return __time; 443 421 } 444 422 445 423 /*
+17 -22
drivers/s390/net/iucv.c
··· 534 534 * 535 535 * Returns: return code from CP's IUCV call 536 536 */ 537 - static __inline__ ulong 538 - b2f0(__u32 code, void *parm) 537 + static inline ulong b2f0(__u32 code, void *parm) 539 538 { 539 + register unsigned long reg0 asm ("0"); 540 + register unsigned long reg1 asm ("1"); 540 541 iucv_dumpit("iparml before b2f0 call:", parm, sizeof(iucv_param)); 541 542 542 - asm volatile ( 543 - "LRA 1,0(%1)\n\t" 544 - "LR 0,%0\n\t" 545 - ".long 0xb2f01000" 546 - : 547 - : "d" (code), "a" (parm) 548 - : "0", "1" 549 - ); 543 + reg0 = code; 544 + reg1 = virt_to_phys(parm); 545 + asm volatile(".long 0xb2f01000" : : "d" (reg0), "a" (reg1)); 550 546 551 547 iucv_dumpit("iparml after b2f0 call:", parm, sizeof(iucv_param)); 552 548 ··· 1244 1248 static int 1245 1249 iucv_query_generic(int want_maxconn) 1246 1250 { 1251 + register unsigned long reg0 asm ("0"); 1252 + register unsigned long reg1 asm ("1"); 1247 1253 iparml_purge *parm = (iparml_purge *)grab_param(); 1248 1254 int bufsize, maxconn; 1249 1255 int ccode; ··· 1254 1256 * Call b2f0 and store R0 (max buffer size), 1255 1257 * R1 (max connections) and CC. 1256 1258 */ 1257 - asm volatile ( 1258 - "LRA 1,0(%4)\n\t" 1259 - "LR 0,%3\n\t" 1260 - ".long 0xb2f01000\n\t" 1261 - "IPM %0\n\t" 1262 - "SRL %0,28\n\t" 1263 - "ST 0,%1\n\t" 1264 - "ST 1,%2\n\t" 1265 - : "=d" (ccode), "=m" (bufsize), "=m" (maxconn) 1266 - : "d" (QUERY), "a" (parm) 1267 - : "0", "1", "cc" 1268 - ); 1259 + reg0 = QUERY; 1260 + reg1 = virt_to_phys(parm); 1261 + asm volatile( 1262 + " .long 0xb2f01000\n" 1263 + " ipm %0\n" 1264 + " srl %0,28\n" 1265 + : "=d" (ccode), "+d" (reg0), "+d" (reg1) : : "cc"); 1266 + bufsize = reg0; 1267 + maxconn = reg1; 1269 1268 release_param(parm); 1270 1269 1271 1270 if (ccode)
+48 -43
drivers/s390/s390mach.c
··· 253 253 kill_task = 1; 254 254 255 255 #ifndef CONFIG_64BIT 256 - asm volatile("ld 0,0(%0)\n" 257 - "ld 2,8(%0)\n" 258 - "ld 4,16(%0)\n" 259 - "ld 6,24(%0)" 260 - : : "a" (&S390_lowcore.floating_pt_save_area)); 256 + asm volatile( 257 + " ld 0,0(%0)\n" 258 + " ld 2,8(%0)\n" 259 + " ld 4,16(%0)\n" 260 + " ld 6,24(%0)" 261 + : : "a" (&S390_lowcore.floating_pt_save_area)); 261 262 #endif 262 263 263 264 if (MACHINE_HAS_IEEE) { ··· 275 274 * Floating point control register can't be restored. 276 275 * Task will be terminated. 277 276 */ 278 - asm volatile ("lfpc 0(%0)" : : "a" (&zero), "m" (zero)); 277 + asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero)); 279 278 kill_task = 1; 280 279 281 - } 282 - else 283 - asm volatile ( 284 - "lfpc 0(%0)" 285 - : : "a" (fpt_creg_save_area)); 280 + } else 281 + asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area)); 286 282 287 - asm volatile("ld 0,0(%0)\n" 288 - "ld 1,8(%0)\n" 289 - "ld 2,16(%0)\n" 290 - "ld 3,24(%0)\n" 291 - "ld 4,32(%0)\n" 292 - "ld 5,40(%0)\n" 293 - "ld 6,48(%0)\n" 294 - "ld 7,56(%0)\n" 295 - "ld 8,64(%0)\n" 296 - "ld 9,72(%0)\n" 297 - "ld 10,80(%0)\n" 298 - "ld 11,88(%0)\n" 299 - "ld 12,96(%0)\n" 300 - "ld 13,104(%0)\n" 301 - "ld 14,112(%0)\n" 302 - "ld 15,120(%0)\n" 303 - : : "a" (fpt_save_area)); 283 + asm volatile( 284 + " ld 0,0(%0)\n" 285 + " ld 1,8(%0)\n" 286 + " ld 2,16(%0)\n" 287 + " ld 3,24(%0)\n" 288 + " ld 4,32(%0)\n" 289 + " ld 5,40(%0)\n" 290 + " ld 6,48(%0)\n" 291 + " ld 7,56(%0)\n" 292 + " ld 8,64(%0)\n" 293 + " ld 9,72(%0)\n" 294 + " ld 10,80(%0)\n" 295 + " ld 11,88(%0)\n" 296 + " ld 12,96(%0)\n" 297 + " ld 13,104(%0)\n" 298 + " ld 14,112(%0)\n" 299 + " ld 15,120(%0)\n" 300 + : : "a" (fpt_save_area)); 304 301 } 305 302 306 303 /* Revalidate access registers */ 307 - asm volatile("lam 0,15,0(%0)" 308 - : : "a" (&S390_lowcore.access_regs_save_area)); 304 + asm volatile( 305 + " lam 0,15,0(%0)" 306 + : : "a" (&S390_lowcore.access_regs_save_area)); 309 307 if (!mci->ar) 310 308 /* 311 309 * Access registers have unknown contents. ··· 321 321 s390_handle_damage("invalid control registers."); 322 322 else 323 323 #ifdef CONFIG_64BIT 324 - asm volatile("lctlg 0,15,0(%0)" 325 - : : "a" (&S390_lowcore.cregs_save_area)); 324 + asm volatile( 325 + " lctlg 0,15,0(%0)" 326 + : : "a" (&S390_lowcore.cregs_save_area)); 326 327 #else 327 - asm volatile("lctl 0,15,0(%0)" 328 - : : "a" (&S390_lowcore.cregs_save_area)); 328 + asm volatile( 329 + " lctl 0,15,0(%0)" 330 + : : "a" (&S390_lowcore.cregs_save_area)); 329 331 #endif 330 332 331 333 /* ··· 341 339 * old contents (should be zero) otherwise set it to zero. 342 340 */ 343 341 if (!mci->pr) 344 - asm volatile("sr 0,0\n" 345 - "sckpf" 346 - : : : "0", "cc"); 342 + asm volatile( 343 + " sr 0,0\n" 344 + " sckpf" 345 + : : : "0", "cc"); 347 346 else 348 347 asm volatile( 349 - "l 0,0(%0)\n" 350 - "sckpf" 351 - : : "a" (&S390_lowcore.tod_progreg_save_area) : "0", "cc"); 348 + " l 0,0(%0)\n" 349 + " sckpf" 350 + : : "a" (&S390_lowcore.tod_progreg_save_area) 351 + : "0", "cc"); 352 352 #endif 353 353 354 354 /* Revalidate clock comparator register */ 355 - asm volatile ("stck 0(%1)\n" 356 - "sckc 0(%1)" 357 - : "=m" (tmpclock) : "a" (&(tmpclock)) : "cc", "memory"); 355 + asm volatile( 356 + " stck 0(%1)\n" 357 + " sckc 0(%1)" 358 + : "=m" (tmpclock) : "a" (&(tmpclock)) : "cc", "memory"); 358 359 359 360 /* Check if old PSW is valid */ 360 361 if (!mci->wp)
+1 -1
include/asm-s390/appldata.h
··· 80 80 parm_list.product_id_addr = (unsigned long) id; 81 81 parm_list.buffer_addr = virt_to_phys(buffer); 82 82 asm volatile( 83 - "diag %1,%0,0xdc" 83 + " diag %1,%0,0xdc" 84 84 : "=d" (ry) 85 85 : "d" (&parm_list), "m" (parm_list), "m" (*id) 86 86 : "cc");
+92 -28
include/asm-s390/atomic.h
··· 30 30 31 31 #ifdef __KERNEL__ 32 32 33 + #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) 34 + 33 35 #define __CS_LOOP(ptr, op_val, op_string) ({ \ 34 36 typeof(ptr->counter) old_val, new_val; \ 35 - __asm__ __volatile__(" l %0,0(%3)\n" \ 36 - "0: lr %1,%0\n" \ 37 - op_string " %1,%4\n" \ 38 - " cs %0,%1,0(%3)\n" \ 39 - " jl 0b" \ 40 - : "=&d" (old_val), "=&d" (new_val), \ 41 - "=m" (((atomic_t *)(ptr))->counter) \ 42 - : "a" (ptr), "d" (op_val), \ 43 - "m" (((atomic_t *)(ptr))->counter) \ 44 - : "cc", "memory" ); \ 37 + asm volatile( \ 38 + " l %0,%2\n" \ 39 + "0: lr %1,%0\n" \ 40 + op_string " %1,%3\n" \ 41 + " cs %0,%1,%2\n" \ 42 + " jl 0b" \ 43 + : "=&d" (old_val), "=&d" (new_val), \ 44 + "=Q" (((atomic_t *)(ptr))->counter) \ 45 + : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \ 46 + : "cc", "memory"); \ 45 47 new_val; \ 46 48 }) 49 + 50 + #else /* __GNUC__ */ 51 + 52 + #define __CS_LOOP(ptr, op_val, op_string) ({ \ 53 + typeof(ptr->counter) old_val, new_val; \ 54 + asm volatile( \ 55 + " l %0,0(%3)\n" \ 56 + "0: lr %1,%0\n" \ 57 + op_string " %1,%4\n" \ 58 + " cs %0,%1,0(%3)\n" \ 59 + " jl 0b" \ 60 + : "=&d" (old_val), "=&d" (new_val), \ 61 + "=m" (((atomic_t *)(ptr))->counter) \ 62 + : "a" (ptr), "d" (op_val), \ 63 + "m" (((atomic_t *)(ptr))->counter) \ 64 + : "cc", "memory"); \ 65 + new_val; \ 66 + }) 67 + 68 + #endif /* __GNUC__ */ 69 + 47 70 #define atomic_read(v) ((v)->counter) 48 71 #define atomic_set(v,i) (((v)->counter) = (i)) 49 72 ··· 104 81 105 82 static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new) 106 83 { 107 - __asm__ __volatile__(" cs %0,%3,0(%2)\n" 108 - : "+d" (old), "=m" (v->counter) 109 - : "a" (v), "d" (new), "m" (v->counter) 110 - : "cc", "memory" ); 84 + #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) 85 + asm volatile( 86 + " cs %0,%2,%1" 87 + : "+d" (old), "=Q" (v->counter) 88 + : "d" (new), "Q" (v->counter) 89 + : "cc", "memory"); 90 + #else /* __GNUC__ */ 91 + asm volatile( 92 + " cs %0,%3,0(%2)" 93 + : "+d" (old), "=m" (v->counter) 94 + : "a" (v), "d" (new), "m" (v->counter) 95 + : "cc", "memory"); 96 + #endif /* __GNUC__ */ 111 97 return old; 112 98 } 113 99 ··· 145 113 } __attribute__ ((aligned (8))) atomic64_t; 146 114 #define ATOMIC64_INIT(i) { (i) } 147 115 116 + #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) 117 + 148 118 #define __CSG_LOOP(ptr, op_val, op_string) ({ \ 149 119 typeof(ptr->counter) old_val, new_val; \ 150 - __asm__ __volatile__(" lg %0,0(%3)\n" \ 151 - "0: lgr %1,%0\n" \ 152 - op_string " %1,%4\n" \ 153 - " csg %0,%1,0(%3)\n" \ 154 - " jl 0b" \ 155 - : "=&d" (old_val), "=&d" (new_val), \ 156 - "=m" (((atomic_t *)(ptr))->counter) \ 157 - : "a" (ptr), "d" (op_val), \ 158 - "m" (((atomic_t *)(ptr))->counter) \ 159 - : "cc", "memory" ); \ 120 + asm volatile( \ 121 + " lg %0,%2\n" \ 122 + "0: lgr %1,%0\n" \ 123 + op_string " %1,%3\n" \ 124 + " csg %0,%1,%2\n" \ 125 + " jl 0b" \ 126 + : "=&d" (old_val), "=&d" (new_val), \ 127 + "=Q" (((atomic_t *)(ptr))->counter) \ 128 + : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \ 129 + : "cc", "memory" ); \ 160 130 new_val; \ 161 131 }) 132 + 133 + #else /* __GNUC__ */ 134 + 135 + #define __CSG_LOOP(ptr, op_val, op_string) ({ \ 136 + typeof(ptr->counter) old_val, new_val; \ 137 + asm volatile( \ 138 + " lg %0,0(%3)\n" \ 139 + "0: lgr %1,%0\n" \ 140 + op_string " %1,%4\n" \ 141 + " csg %0,%1,0(%3)\n" \ 142 + " jl 0b" \ 143 + : "=&d" (old_val), "=&d" (new_val), \ 144 + "=m" (((atomic_t *)(ptr))->counter) \ 145 + : "a" (ptr), "d" (op_val), \ 146 + "m" (((atomic_t *)(ptr))->counter) \ 147 + : "cc", "memory" ); \ 148 + new_val; \ 149 + }) 150 + 151 + #endif /* __GNUC__ */ 152 + 162 153 #define atomic64_read(v) ((v)->counter) 163 154 #define atomic64_set(v,i) (((v)->counter) = (i)) 164 155 ··· 218 163 static __inline__ long long atomic64_cmpxchg(atomic64_t *v, 219 164 long long old, long long new) 220 165 { 221 - __asm__ __volatile__(" csg %0,%3,0(%2)\n" 222 - : "+d" (old), "=m" (v->counter) 223 - : "a" (v), "d" (new), "m" (v->counter) 224 - : "cc", "memory" ); 166 + #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) 167 + asm volatile( 168 + " csg %0,%2,%1" 169 + : "+d" (old), "=Q" (v->counter) 170 + : "d" (new), "Q" (v->counter) 171 + : "cc", "memory"); 172 + #else /* __GNUC__ */ 173 + asm volatile( 174 + " csg %0,%3,0(%2)" 175 + : "+d" (old), "=m" (v->counter) 176 + : "a" (v), "d" (new), "m" (v->counter) 177 + : "cc", "memory"); 178 + #endif /* __GNUC__ */ 225 179 return old; 226 180 } 227 181
+289 -337
include/asm-s390/bitops.h
··· 67 67 #define __BITOPS_AND "nr" 68 68 #define __BITOPS_XOR "xr" 69 69 70 - #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ 71 - __asm__ __volatile__(" l %0,0(%4)\n" \ 72 - "0: lr %1,%0\n" \ 73 - __op_string " %1,%3\n" \ 74 - " cs %0,%1,0(%4)\n" \ 75 - " jl 0b" \ 76 - : "=&d" (__old), "=&d" (__new), \ 77 - "=m" (*(unsigned long *) __addr) \ 78 - : "d" (__val), "a" (__addr), \ 79 - "m" (*(unsigned long *) __addr) : "cc" ); 70 + #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) 71 + 72 + #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ 73 + asm volatile( \ 74 + " l %0,%2\n" \ 75 + "0: lr %1,%0\n" \ 76 + __op_string " %1,%3\n" \ 77 + " cs %0,%1,%2\n" \ 78 + " jl 0b" \ 79 + : "=&d" (__old), "=&d" (__new), \ 80 + "=Q" (*(unsigned long *) __addr) \ 81 + : "d" (__val), "Q" (*(unsigned long *) __addr) \ 82 + : "cc"); 83 + 84 + #else /* __GNUC__ */ 85 + 86 + #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ 87 + asm volatile( \ 88 + " l %0,0(%4)\n" \ 89 + "0: lr %1,%0\n" \ 90 + __op_string " %1,%3\n" \ 91 + " cs %0,%1,0(%4)\n" \ 92 + " jl 0b" \ 93 + : "=&d" (__old), "=&d" (__new), \ 94 + "=m" (*(unsigned long *) __addr) \ 95 + : "d" (__val), "a" (__addr), \ 96 + "m" (*(unsigned long *) __addr) : "cc"); 97 + 98 + #endif /* __GNUC__ */ 80 99 81 100 #else /* __s390x__ */ 82 101 ··· 105 86 #define __BITOPS_AND "ngr" 106 87 #define __BITOPS_XOR "xgr" 107 88 108 - #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ 109 - __asm__ __volatile__(" lg %0,0(%4)\n" \ 110 - "0: lgr %1,%0\n" \ 111 - __op_string " %1,%3\n" \ 112 - " csg %0,%1,0(%4)\n" \ 113 - " jl 0b" \ 114 - : "=&d" (__old), "=&d" (__new), \ 115 - "=m" (*(unsigned long *) __addr) \ 116 - : "d" (__val), "a" (__addr), \ 117 - "m" (*(unsigned long *) __addr) : "cc" ); 89 + #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) 90 + 91 + #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ 92 + asm volatile( \ 93 + " lg %0,%2\n" \ 94 + "0: lgr %1,%0\n" \ 95 + __op_string " %1,%3\n" \ 96 + " csg %0,%1,%2\n" \ 97 + " jl 0b" \ 98 + : "=&d" (__old), "=&d" (__new), \ 99 + "=Q" (*(unsigned long *) __addr) \ 100 + : "d" (__val), "Q" (*(unsigned long *) __addr) \ 101 + : "cc"); 102 + 103 + #else /* __GNUC__ */ 104 + 105 + #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ 106 + asm volatile( \ 107 + " lg %0,0(%4)\n" \ 108 + "0: lgr %1,%0\n" \ 109 + __op_string " %1,%3\n" \ 110 + " csg %0,%1,0(%4)\n" \ 111 + " jl 0b" \ 112 + : "=&d" (__old), "=&d" (__new), \ 113 + "=m" (*(unsigned long *) __addr) \ 114 + : "d" (__val), "a" (__addr), \ 115 + "m" (*(unsigned long *) __addr) : "cc"); 116 + 117 + 118 + #endif /* __GNUC__ */ 118 119 119 120 #endif /* __s390x__ */ 120 121 121 122 #define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE) 122 - #define __BITOPS_BARRIER() __asm__ __volatile__ ( "" : : : "memory" ) 123 + #define __BITOPS_BARRIER() asm volatile("" : : : "memory") 123 124 124 125 #ifdef CONFIG_SMP 125 126 /* ··· 256 217 unsigned long addr; 257 218 258 219 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 259 - asm volatile("oc 0(1,%1),0(%2)" 260 - : "=m" (*(char *) addr) 261 - : "a" (addr), "a" (_oi_bitmap + (nr & 7)), 262 - "m" (*(char *) addr) : "cc" ); 220 + asm volatile( 221 + " oc 0(1,%1),0(%2)" 222 + : "=m" (*(char *) addr) : "a" (addr), 223 + "a" (_oi_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc" ); 263 224 } 264 225 265 226 static inline void ··· 268 229 unsigned long addr; 269 230 270 231 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 271 - switch (nr&7) { 272 - case 0: 273 - asm volatile ("oi 0(%1),0x01" : "=m" (*(char *) addr) 274 - : "a" (addr), "m" (*(char *) addr) : "cc" ); 275 - break; 276 - case 1: 277 - asm volatile ("oi 0(%1),0x02" : "=m" (*(char *) addr) 278 - : "a" (addr), "m" (*(char *) addr) : "cc" ); 279 - break; 280 - case 2: 281 - asm volatile ("oi 0(%1),0x04" : "=m" (*(char *) addr) 282 - : "a" (addr), "m" (*(char *) addr) : "cc" ); 283 - break; 284 - case 3: 285 - asm volatile ("oi 0(%1),0x08" : "=m" (*(char *) addr) 286 - : "a" (addr), "m" (*(char *) addr) : "cc" ); 287 - break; 288 - case 4: 289 - asm volatile ("oi 0(%1),0x10" : "=m" (*(char *) addr) 290 - : "a" (addr), "m" (*(char *) addr) : "cc" ); 291 - break; 292 - case 5: 293 - asm volatile ("oi 0(%1),0x20" : "=m" (*(char *) addr) 294 - : "a" (addr), "m" (*(char *) addr) : "cc" ); 295 - break; 296 - case 6: 297 - asm volatile ("oi 0(%1),0x40" : "=m" (*(char *) addr) 298 - : "a" (addr), "m" (*(char *) addr) : "cc" ); 299 - break; 300 - case 7: 301 - asm volatile ("oi 0(%1),0x80" : "=m" (*(char *) addr) 302 - : "a" (addr), "m" (*(char *) addr) : "cc" ); 303 - break; 304 - } 232 + *(unsigned char *) addr |= 1 << (nr & 7); 305 233 } 306 234 307 235 #define set_bit_simple(nr,addr) \ ··· 285 279 unsigned long addr; 286 280 287 281 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 288 - asm volatile("nc 0(1,%1),0(%2)" 289 - : "=m" (*(char *) addr) 290 - : "a" (addr), "a" (_ni_bitmap + (nr & 7)), 291 - "m" (*(char *) addr) : "cc" ); 282 + asm volatile( 283 + " nc 0(1,%1),0(%2)" 284 + : "=m" (*(char *) addr) : "a" (addr), 285 + "a" (_ni_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc"); 292 286 } 293 287 294 288 static inline void ··· 297 291 unsigned long addr; 298 292 299 293 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 300 - switch (nr&7) { 301 - case 0: 302 - asm volatile ("ni 0(%1),0xFE" : "=m" (*(char *) addr) 303 - : "a" (addr), "m" (*(char *) addr) : "cc" ); 304 - break; 305 - case 1: 306 - asm volatile ("ni 0(%1),0xFD": "=m" (*(char *) addr) 307 - : "a" (addr), "m" (*(char *) addr) : "cc" ); 308 - break; 309 - case 2: 310 - asm volatile ("ni 0(%1),0xFB" : "=m" (*(char *) addr) 311 - : "a" (addr), "m" (*(char *) addr) : "cc" ); 312 - break; 313 - case 3: 314 - asm volatile ("ni 0(%1),0xF7" : "=m" (*(char *) addr) 315 - : "a" (addr), "m" (*(char *) addr) : "cc" ); 316 - break; 317 - case 4: 318 - asm volatile ("ni 0(%1),0xEF" : "=m" (*(char *) addr) 319 - : "a" (addr), "m" (*(char *) addr) : "cc" ); 320 - break; 321 - case 5: 322 - asm volatile ("ni 0(%1),0xDF" : "=m" (*(char *) addr) 323 - : "a" (addr), "m" (*(char *) addr) : "cc" ); 324 - break; 325 - case 6: 326 - asm volatile ("ni 0(%1),0xBF" : "=m" (*(char *) addr) 327 - : "a" (addr), "m" (*(char *) addr) : "cc" ); 328 - break; 329 - case 7: 330 - asm volatile ("ni 0(%1),0x7F" : "=m" (*(char *) addr) 331 - : "a" (addr), "m" (*(char *) addr) : "cc" ); 332 - break; 333 - } 294 + *(unsigned char *) addr &= ~(1 << (nr & 7)); 334 295 } 335 296 336 297 #define clear_bit_simple(nr,addr) \ ··· 313 340 unsigned long addr; 314 341 315 342 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 316 - asm volatile("xc 0(1,%1),0(%2)" 317 - : "=m" (*(char *) addr) 318 - : "a" (addr), "a" (_oi_bitmap + (nr & 7)), 319 - "m" (*(char *) addr) : "cc" ); 343 + asm volatile( 344 + " xc 0(1,%1),0(%2)" 345 + : "=m" (*(char *) addr) : "a" (addr), 346 + "a" (_oi_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc" ); 320 347 } 321 348 322 349 static inline void ··· 325 352 unsigned long addr; 326 353 327 354 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 328 - switch (nr&7) { 329 - case 0: 330 - asm volatile ("xi 0(%1),0x01" : "=m" (*(char *) addr) 331 - : "a" (addr), "m" (*(char *) addr) : "cc" ); 332 - break; 333 - case 1: 334 - asm volatile ("xi 0(%1),0x02" : "=m" (*(char *) addr) 335 - : "a" (addr), "m" (*(char *) addr) : "cc" ); 336 - break; 337 - case 2: 338 - asm volatile ("xi 0(%1),0x04" : "=m" (*(char *) addr) 339 - : "a" (addr), "m" (*(char *) addr) : "cc" ); 340 - break; 341 - case 3: 342 - asm volatile ("xi 0(%1),0x08" : "=m" (*(char *) addr) 343 - : "a" (addr), "m" (*(char *) addr) : "cc" ); 344 - break; 345 - case 4: 346 - asm volatile ("xi 0(%1),0x10" : "=m" (*(char *) addr) 347 - : "a" (addr), "m" (*(char *) addr) : "cc" ); 348 - break; 349 - case 5: 350 - asm volatile ("xi 0(%1),0x20" : "=m" (*(char *) addr) 351 - : "a" (addr), "m" (*(char *) addr) : "cc" ); 352 - break; 353 - case 6: 354 - asm volatile ("xi 0(%1),0x40" : "=m" (*(char *) addr) 355 - : "a" (addr), "m" (*(char *) addr) : "cc" ); 356 - break; 357 - case 7: 358 - asm volatile ("xi 0(%1),0x80" : "=m" (*(char *) addr) 359 - : "a" (addr), "m" (*(char *) addr) : "cc" ); 360 - break; 361 - } 355 + *(unsigned char *) addr ^= 1 << (nr & 7); 362 356 } 363 357 364 358 #define change_bit_simple(nr,addr) \ ··· 344 404 345 405 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 346 406 ch = *(unsigned char *) addr; 347 - asm volatile("oc 0(1,%1),0(%2)" 348 - : "=m" (*(char *) addr) 349 - : "a" (addr), "a" (_oi_bitmap + (nr & 7)), 350 - "m" (*(char *) addr) : "cc", "memory" ); 407 + asm volatile( 408 + " oc 0(1,%1),0(%2)" 409 + : "=m" (*(char *) addr) 410 + : "a" (addr), "a" (_oi_bitmap + (nr & 7)), 411 + "m" (*(char *) addr) : "cc", "memory"); 351 412 return (ch >> (nr & 7)) & 1; 352 413 } 353 414 #define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y) ··· 364 423 365 424 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 366 425 ch = *(unsigned char *) addr; 367 - asm volatile("nc 0(1,%1),0(%2)" 368 - : "=m" (*(char *) addr) 369 - : "a" (addr), "a" (_ni_bitmap + (nr & 7)), 370 - "m" (*(char *) addr) : "cc", "memory" ); 426 + asm volatile( 427 + " nc 0(1,%1),0(%2)" 428 + : "=m" (*(char *) addr) 429 + : "a" (addr), "a" (_ni_bitmap + (nr & 7)), 430 + "m" (*(char *) addr) : "cc", "memory"); 371 431 return (ch >> (nr & 7)) & 1; 372 432 } 373 433 #define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y) ··· 384 442 385 443 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 386 444 ch = *(unsigned char *) addr; 387 - asm volatile("xc 0(1,%1),0(%2)" 388 - : "=m" (*(char *) addr) 389 - : "a" (addr), "a" (_oi_bitmap + (nr & 7)), 390 - "m" (*(char *) addr) : "cc", "memory" ); 445 + asm volatile( 446 + " xc 0(1,%1),0(%2)" 447 + : "=m" (*(char *) addr) 448 + : "a" (addr), "a" (_oi_bitmap + (nr & 7)), 449 + "m" (*(char *) addr) : "cc", "memory"); 391 450 return (ch >> (nr & 7)) & 1; 392 451 } 393 452 #define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y) ··· 500 557 501 558 if (!size) 502 559 return 0; 503 - __asm__(" lhi %1,-1\n" 504 - " lr %2,%3\n" 505 - " slr %0,%0\n" 506 - " ahi %2,31\n" 507 - " srl %2,5\n" 508 - "0: c %1,0(%0,%4)\n" 509 - " jne 1f\n" 510 - " la %0,4(%0)\n" 511 - " brct %2,0b\n" 512 - " lr %0,%3\n" 513 - " j 4f\n" 514 - "1: l %2,0(%0,%4)\n" 515 - " sll %0,3\n" 516 - " lhi %1,0xff\n" 517 - " tml %2,0xffff\n" 518 - " jno 2f\n" 519 - " ahi %0,16\n" 520 - " srl %2,16\n" 521 - "2: tml %2,0x00ff\n" 522 - " jno 3f\n" 523 - " ahi %0,8\n" 524 - " srl %2,8\n" 525 - "3: nr %2,%1\n" 526 - " ic %2,0(%2,%5)\n" 527 - " alr %0,%2\n" 528 - "4:" 529 - : "=&a" (res), "=&d" (cmp), "=&a" (count) 530 - : "a" (size), "a" (addr), "a" (&_zb_findmap), 531 - "m" (*(addrtype *) addr) : "cc" ); 560 + asm volatile( 561 + " lhi %1,-1\n" 562 + " lr %2,%3\n" 563 + " slr %0,%0\n" 564 + " ahi %2,31\n" 565 + " srl %2,5\n" 566 + "0: c %1,0(%0,%4)\n" 567 + " jne 1f\n" 568 + " la %0,4(%0)\n" 569 + " brct %2,0b\n" 570 + " lr %0,%3\n" 571 + " j 4f\n" 572 + "1: l %2,0(%0,%4)\n" 573 + " sll %0,3\n" 574 + " lhi %1,0xff\n" 575 + " tml %2,0xffff\n" 576 + " jno 2f\n" 577 + " ahi %0,16\n" 578 + " srl %2,16\n" 579 + "2: tml %2,0x00ff\n" 580 + " jno 3f\n" 581 + " ahi %0,8\n" 582 + " srl %2,8\n" 583 + "3: nr %2,%1\n" 584 + " ic %2,0(%2,%5)\n" 585 + " alr %0,%2\n" 586 + "4:" 587 + : "=&a" (res), "=&d" (cmp), "=&a" (count) 588 + : "a" (size), "a" (addr), "a" (&_zb_findmap), 589 + "m" (*(addrtype *) addr) : "cc"); 532 590 return (res < size) ? res : size; 533 591 } 534 592 ··· 542 598 543 599 if (!size) 544 600 return 0; 545 - __asm__(" slr %1,%1\n" 546 - " lr %2,%3\n" 547 - " slr %0,%0\n" 548 - " ahi %2,31\n" 549 - " srl %2,5\n" 550 - "0: c %1,0(%0,%4)\n" 551 - " jne 1f\n" 552 - " la %0,4(%0)\n" 553 - " brct %2,0b\n" 554 - " lr %0,%3\n" 555 - " j 4f\n" 556 - "1: l %2,0(%0,%4)\n" 557 - " sll %0,3\n" 558 - " lhi %1,0xff\n" 559 - " tml %2,0xffff\n" 560 - " jnz 2f\n" 561 - " ahi %0,16\n" 562 - " srl %2,16\n" 563 - "2: tml %2,0x00ff\n" 564 - " jnz 3f\n" 565 - " ahi %0,8\n" 566 - " srl %2,8\n" 567 - "3: nr %2,%1\n" 568 - " ic %2,0(%2,%5)\n" 569 - " alr %0,%2\n" 570 - "4:" 571 - : "=&a" (res), "=&d" (cmp), "=&a" (count) 572 - : "a" (size), "a" (addr), "a" (&_sb_findmap), 573 - "m" (*(addrtype *) addr) : "cc" ); 601 + asm volatile( 602 + " slr %1,%1\n" 603 + " lr %2,%3\n" 604 + " slr %0,%0\n" 605 + " ahi %2,31\n" 606 + " srl %2,5\n" 607 + "0: c %1,0(%0,%4)\n" 608 + " jne 1f\n" 609 + " la %0,4(%0)\n" 610 + " brct %2,0b\n" 611 + " lr %0,%3\n" 612 + " j 4f\n" 613 + "1: l %2,0(%0,%4)\n" 614 + " sll %0,3\n" 615 + " lhi %1,0xff\n" 616 + " tml %2,0xffff\n" 617 + " jnz 2f\n" 618 + " ahi %0,16\n" 619 + " srl %2,16\n" 620 + "2: tml %2,0x00ff\n" 621 + " jnz 3f\n" 622 + " ahi %0,8\n" 623 + " srl %2,8\n" 624 + "3: nr %2,%1\n" 625 + " ic %2,0(%2,%5)\n" 626 + " alr %0,%2\n" 627 + "4:" 628 + : "=&a" (res), "=&d" (cmp), "=&a" (count) 629 + : "a" (size), "a" (addr), "a" (&_sb_findmap), 630 + "m" (*(addrtype *) addr) : "cc"); 574 631 return (res < size) ? res : size; 575 632 } 576 633 ··· 585 640 586 641 if (!size) 587 642 return 0; 588 - __asm__(" lghi %1,-1\n" 589 - " lgr %2,%3\n" 590 - " slgr %0,%0\n" 591 - " aghi %2,63\n" 592 - " srlg %2,%2,6\n" 593 - "0: cg %1,0(%0,%4)\n" 594 - " jne 1f\n" 595 - " la %0,8(%0)\n" 596 - " brct %2,0b\n" 597 - " lgr %0,%3\n" 598 - " j 5f\n" 599 - "1: lg %2,0(%0,%4)\n" 600 - " sllg %0,%0,3\n" 601 - " clr %2,%1\n" 602 - " jne 2f\n" 603 - " aghi %0,32\n" 604 - " srlg %2,%2,32\n" 605 - "2: lghi %1,0xff\n" 606 - " tmll %2,0xffff\n" 607 - " jno 3f\n" 608 - " aghi %0,16\n" 609 - " srl %2,16\n" 610 - "3: tmll %2,0x00ff\n" 611 - " jno 4f\n" 612 - " aghi %0,8\n" 613 - " srl %2,8\n" 614 - "4: ngr %2,%1\n" 615 - " ic %2,0(%2,%5)\n" 616 - " algr %0,%2\n" 617 - "5:" 618 - : "=&a" (res), "=&d" (cmp), "=&a" (count) 643 + asm volatile( 644 + " lghi %1,-1\n" 645 + " lgr %2,%3\n" 646 + " slgr %0,%0\n" 647 + " aghi %2,63\n" 648 + " srlg %2,%2,6\n" 649 + "0: cg %1,0(%0,%4)\n" 650 + " jne 1f\n" 651 + " la %0,8(%0)\n" 652 + " brct %2,0b\n" 653 + " lgr %0,%3\n" 654 + " j 5f\n" 655 + "1: lg %2,0(%0,%4)\n" 656 + " sllg %0,%0,3\n" 657 + " clr %2,%1\n" 658 + " jne 2f\n" 659 + " aghi %0,32\n" 660 + " srlg %2,%2,32\n" 661 + "2: lghi %1,0xff\n" 662 + " tmll %2,0xffff\n" 663 + " jno 3f\n" 664 + " aghi %0,16\n" 665 + " srl %2,16\n" 666 + "3: tmll %2,0x00ff\n" 667 + " jno 4f\n" 668 + " aghi %0,8\n" 669 + " srl %2,8\n" 670 + "4: ngr %2,%1\n" 671 + " ic %2,0(%2,%5)\n" 672 + " algr %0,%2\n" 673 + "5:" 674 + : "=&a" (res), "=&d" (cmp), "=&a" (count) 619 675 : "a" (size), "a" (addr), "a" (&_zb_findmap), 620 - "m" (*(addrtype *) addr) : "cc" ); 676 + "m" (*(addrtype *) addr) : "cc"); 621 677 return (res < size) ? res : size; 622 678 } 623 679 ··· 630 684 631 685 if (!size) 632 686 return 0; 633 - __asm__(" slgr %1,%1\n" 634 - " lgr %2,%3\n" 635 - " slgr %0,%0\n" 636 - " aghi %2,63\n" 637 - " srlg %2,%2,6\n" 638 - "0: cg %1,0(%0,%4)\n" 639 - " jne 1f\n" 640 - " aghi %0,8\n" 641 - " brct %2,0b\n" 642 - " lgr %0,%3\n" 643 - " j 5f\n" 644 - "1: lg %2,0(%0,%4)\n" 645 - " sllg %0,%0,3\n" 646 - " clr %2,%1\n" 647 - " jne 2f\n" 648 - " aghi %0,32\n" 649 - " srlg %2,%2,32\n" 650 - "2: lghi %1,0xff\n" 651 - " tmll %2,0xffff\n" 652 - " jnz 3f\n" 653 - " aghi %0,16\n" 654 - " srl %2,16\n" 655 - "3: tmll %2,0x00ff\n" 656 - " jnz 4f\n" 657 - " aghi %0,8\n" 658 - " srl %2,8\n" 659 - "4: ngr %2,%1\n" 660 - " ic %2,0(%2,%5)\n" 661 - " algr %0,%2\n" 662 - "5:" 663 - : "=&a" (res), "=&d" (cmp), "=&a" (count) 687 + asm volatile( 688 + " slgr %1,%1\n" 689 + " lgr %2,%3\n" 690 + " slgr %0,%0\n" 691 + " aghi %2,63\n" 692 + " srlg %2,%2,6\n" 693 + "0: cg %1,0(%0,%4)\n" 694 + " jne 1f\n" 695 + " aghi %0,8\n" 696 + " brct %2,0b\n" 697 + " lgr %0,%3\n" 698 + " j 5f\n" 699 + "1: lg %2,0(%0,%4)\n" 700 + " sllg %0,%0,3\n" 701 + " clr %2,%1\n" 702 + " jne 2f\n" 703 + " aghi %0,32\n" 704 + " srlg %2,%2,32\n" 705 + "2: lghi %1,0xff\n" 706 + " tmll %2,0xffff\n" 707 + " jnz 3f\n" 708 + " aghi %0,16\n" 709 + " srl %2,16\n" 710 + "3: tmll %2,0x00ff\n" 711 + " jnz 4f\n" 712 + " aghi %0,8\n" 713 + " srl %2,8\n" 714 + "4: ngr %2,%1\n" 715 + " ic %2,0(%2,%5)\n" 716 + " algr %0,%2\n" 717 + "5:" 718 + : "=&a" (res), "=&d" (cmp), "=&a" (count) 664 719 : "a" (size), "a" (addr), "a" (&_sb_findmap), 665 - "m" (*(addrtype *) addr) : "cc" ); 720 + "m" (*(addrtype *) addr) : "cc"); 666 721 return (res < size) ? res : size; 667 722 } 668 723 ··· 779 832 780 833 if (!size) 781 834 return 0; 782 - __asm__(" lhi %1,-1\n" 783 - " lr %2,%3\n" 784 - " ahi %2,31\n" 785 - " srl %2,5\n" 786 - " slr %0,%0\n" 787 - "0: cl %1,0(%0,%4)\n" 788 - " jne 1f\n" 789 - " ahi %0,4\n" 790 - " brct %2,0b\n" 791 - " lr %0,%3\n" 792 - " j 4f\n" 793 - "1: l %2,0(%0,%4)\n" 794 - " sll %0,3\n" 795 - " ahi %0,24\n" 796 - " lhi %1,0xff\n" 797 - " tmh %2,0xffff\n" 798 - " jo 2f\n" 799 - " ahi %0,-16\n" 800 - " srl %2,16\n" 801 - "2: tml %2,0xff00\n" 802 - " jo 3f\n" 803 - " ahi %0,-8\n" 804 - " srl %2,8\n" 805 - "3: nr %2,%1\n" 806 - " ic %2,0(%2,%5)\n" 807 - " alr %0,%2\n" 808 - "4:" 809 - : "=&a" (res), "=&d" (cmp), "=&a" (count) 810 - : "a" (size), "a" (vaddr), "a" (&_zb_findmap), 811 - "m" (*(addrtype *) vaddr) : "cc" ); 835 + asm volatile( 836 + " lhi %1,-1\n" 837 + " lr %2,%3\n" 838 + " ahi %2,31\n" 839 + " srl %2,5\n" 840 + " slr %0,%0\n" 841 + "0: cl %1,0(%0,%4)\n" 842 + " jne 1f\n" 843 + " ahi %0,4\n" 844 + " brct %2,0b\n" 845 + " lr %0,%3\n" 846 + " j 4f\n" 847 + "1: l %2,0(%0,%4)\n" 848 + " sll %0,3\n" 849 + " ahi %0,24\n" 850 + " lhi %1,0xff\n" 851 + " tmh %2,0xffff\n" 852 + " jo 2f\n" 853 + " ahi %0,-16\n" 854 + " srl %2,16\n" 855 + "2: tml %2,0xff00\n" 856 + " jo 3f\n" 857 + " ahi %0,-8\n" 858 + " srl %2,8\n" 859 + "3: nr %2,%1\n" 860 + " ic %2,0(%2,%5)\n" 861 + " alr %0,%2\n" 862 + "4:" 863 + : "=&a" (res), "=&d" (cmp), "=&a" (count) 864 + : "a" (size), "a" (vaddr), "a" (&_zb_findmap), 865 + "m" (*(addrtype *) vaddr) : "cc"); 812 866 return (res < size) ? res : size; 813 867 } 814 868 ··· 823 875 824 876 if (!size) 825 877 return 0; 826 - __asm__(" lghi %1,-1\n" 827 - " lgr %2,%3\n" 828 - " aghi %2,63\n" 829 - " srlg %2,%2,6\n" 830 - " slgr %0,%0\n" 831 - "0: clg %1,0(%0,%4)\n" 832 - " jne 1f\n" 833 - " aghi %0,8\n" 834 - " brct %2,0b\n" 835 - " lgr %0,%3\n" 836 - " j 5f\n" 837 - "1: cl %1,0(%0,%4)\n" 838 - " jne 2f\n" 839 - " aghi %0,4\n" 840 - "2: l %2,0(%0,%4)\n" 841 - " sllg %0,%0,3\n" 842 - " aghi %0,24\n" 843 - " lghi %1,0xff\n" 844 - " tmlh %2,0xffff\n" 845 - " jo 3f\n" 846 - " aghi %0,-16\n" 847 - " srl %2,16\n" 848 - "3: tmll %2,0xff00\n" 849 - " jo 4f\n" 850 - " aghi %0,-8\n" 851 - " srl %2,8\n" 852 - "4: ngr %2,%1\n" 853 - " ic %2,0(%2,%5)\n" 854 - " algr %0,%2\n" 855 - "5:" 856 - : "=&a" (res), "=&d" (cmp), "=&a" (count) 878 + asm volatile( 879 + " lghi %1,-1\n" 880 + " lgr %2,%3\n" 881 + " aghi %2,63\n" 882 + " srlg %2,%2,6\n" 883 + " slgr %0,%0\n" 884 + "0: clg %1,0(%0,%4)\n" 885 + " jne 1f\n" 886 + " aghi %0,8\n" 887 + " brct %2,0b\n" 888 + " lgr %0,%3\n" 889 + " j 5f\n" 890 + "1: cl %1,0(%0,%4)\n" 891 + " jne 2f\n" 892 + " aghi %0,4\n" 893 + "2: l %2,0(%0,%4)\n" 894 + " sllg %0,%0,3\n" 895 + " aghi %0,24\n" 896 + " lghi %1,0xff\n" 897 + " tmlh %2,0xffff\n" 898 + " jo 3f\n" 899 + " aghi %0,-16\n" 900 + " srl %2,16\n" 901 + "3: tmll %2,0xff00\n" 902 + " jo 4f\n" 903 + " aghi %0,-8\n" 904 + " srl %2,8\n" 905 + "4: ngr %2,%1\n" 906 + " ic %2,0(%2,%5)\n" 907 + " algr %0,%2\n" 908 + "5:" 909 + : "=&a" (res), "=&d" (cmp), "=&a" (count) 857 910 : "a" (size), "a" (vaddr), "a" (&_zb_findmap), 858 - "m" (*(addrtype *) vaddr) : "cc" ); 911 + "m" (*(addrtype *) vaddr) : "cc"); 859 912 return (res < size) ? res : size; 860 913 } 861 914 ··· 876 927 p = addr + offset / __BITOPS_WORDSIZE; 877 928 if (bit) { 878 929 #ifndef __s390x__ 879 - asm(" ic %0,0(%1)\n" 880 - " icm %0,2,1(%1)\n" 881 - " icm %0,4,2(%1)\n" 882 - " icm %0,8,3(%1)" 883 - : "=&a" (word) : "a" (p), "m" (*p) : "cc" ); 930 + asm volatile( 931 + " ic %0,0(%1)\n" 932 + " icm %0,2,1(%1)\n" 933 + " icm %0,4,2(%1)\n" 934 + " icm %0,8,3(%1)" 935 + : "=&a" (word) : "a" (p), "m" (*p) : "cc"); 884 936 #else 885 - asm(" lrvg %0,%1" : "=a" (word) : "m" (*p) ); 937 + asm volatile( 938 + " lrvg %0,%1" 939 + : "=a" (word) : "m" (*p) ); 886 940 #endif 887 941 /* 888 942 * s390 version of ffz returns __BITOPS_WORDSIZE
+22 -28
include/asm-s390/byteorder.h
··· 14 14 #ifdef __GNUC__ 15 15 16 16 #ifdef __s390x__ 17 - static __inline__ __u64 ___arch__swab64p(const __u64 *x) 17 + static inline __u64 ___arch__swab64p(const __u64 *x) 18 18 { 19 19 __u64 result; 20 20 21 - __asm__ __volatile__ ( 22 - " lrvg %0,%1" 23 - : "=d" (result) : "m" (*x) ); 21 + asm volatile("lrvg %0,%1" : "=d" (result) : "m" (*x)); 24 22 return result; 25 23 } 26 24 27 - static __inline__ __u64 ___arch__swab64(__u64 x) 25 + static inline __u64 ___arch__swab64(__u64 x) 28 26 { 29 27 __u64 result; 30 28 31 - __asm__ __volatile__ ( 32 - " lrvgr %0,%1" 33 - : "=d" (result) : "d" (x) ); 29 + asm volatile("lrvgr %0,%1" : "=d" (result) : "d" (x)); 34 30 return result; 35 31 } 36 32 37 - static __inline__ void ___arch__swab64s(__u64 *x) 33 + static inline void ___arch__swab64s(__u64 *x) 38 34 { 39 35 *x = ___arch__swab64p(x); 40 36 } 41 37 #endif /* __s390x__ */ 42 38 43 - static __inline__ __u32 ___arch__swab32p(const __u32 *x) 39 + static inline __u32 ___arch__swab32p(const __u32 *x) 44 40 { 45 41 __u32 result; 46 42 47 - __asm__ __volatile__ ( 43 + asm volatile( 48 44 #ifndef __s390x__ 49 - " icm %0,8,3(%1)\n" 50 - " icm %0,4,2(%1)\n" 51 - " icm %0,2,1(%1)\n" 52 - " ic %0,0(%1)" 53 - : "=&d" (result) : "a" (x), "m" (*x) : "cc" ); 45 + " icm %0,8,3(%1)\n" 46 + " icm %0,4,2(%1)\n" 47 + " icm %0,2,1(%1)\n" 48 + " ic %0,0(%1)" 49 + : "=&d" (result) : "a" (x), "m" (*x) : "cc"); 54 50 #else /* __s390x__ */ 55 - " lrv %0,%1" 56 - : "=d" (result) : "m" (*x) ); 51 + " lrv %0,%1" 52 + : "=d" (result) : "m" (*x)); 57 53 #endif /* __s390x__ */ 58 54 return result; 59 55 } 60 56 61 - static __inline__ __u32 ___arch__swab32(__u32 x) 57 + static inline __u32 ___arch__swab32(__u32 x) 62 58 { 63 59 #ifndef __s390x__ 64 60 return ___arch__swab32p(&x); 65 61 #else /* __s390x__ */ 66 62 __u32 result; 67 63 68 - __asm__ __volatile__ ( 69 - " lrvr %0,%1" 70 - : "=d" (result) : "d" (x) ); 64 + asm volatile("lrvr %0,%1" : "=d" (result) : "d" (x)); 71 65 return result; 72 66 #endif /* __s390x__ */ 73 67 } ··· 75 81 { 76 82 __u16 result; 77 83 78 - __asm__ __volatile__ ( 84 + asm volatile( 79 85 #ifndef __s390x__ 80 - " icm %0,2,1(%1)\n" 81 - " ic %0,0(%1)\n" 82 - : "=&d" (result) : "a" (x), "m" (*x) : "cc" ); 86 + " icm %0,2,1(%1)\n" 87 + " ic %0,0(%1)\n" 88 + : "=&d" (result) : "a" (x), "m" (*x) : "cc"); 83 89 #else /* __s390x__ */ 84 - " lrvh %0,%1" 85 - : "=d" (result) : "m" (*x) ); 90 + " lrvh %0,%1" 91 + : "=d" (result) : "m" (*x)); 86 92 #endif /* __s390x__ */ 87 93 return result; 88 94 }
+55 -121
include/asm-s390/checksum.h
··· 30 30 static inline unsigned int 31 31 csum_partial(const unsigned char * buff, int len, unsigned int sum) 32 32 { 33 - /* 34 - * Experiments with ethernet and slip connections show that buf 35 - * is aligned on either a 2-byte or 4-byte boundary. 36 - */ 37 - #ifndef __s390x__ 38 - register_pair rp; 33 + register unsigned long reg2 asm("2") = (unsigned long) buff; 34 + register unsigned long reg3 asm("3") = (unsigned long) len; 39 35 40 - rp.subreg.even = (unsigned long) buff; 41 - rp.subreg.odd = (unsigned long) len; 42 - __asm__ __volatile__ ( 43 - "0: cksm %0,%1\n" /* do checksum on longs */ 44 - " jo 0b\n" 45 - : "+&d" (sum), "+&a" (rp) : : "cc", "memory" ); 46 - #else /* __s390x__ */ 47 - __asm__ __volatile__ ( 48 - " lgr 2,%1\n" /* address in gpr 2 */ 49 - " lgfr 3,%2\n" /* length in gpr 3 */ 50 - "0: cksm %0,2\n" /* do checksum on longs */ 51 - " jo 0b\n" 52 - : "+&d" (sum) 53 - : "d" (buff), "d" (len) 54 - : "cc", "memory", "2", "3" ); 55 - #endif /* __s390x__ */ 56 - return sum; 57 - } 58 - 59 - /* 60 - * csum_partial as an inline function 61 - */ 62 - static inline unsigned int 63 - csum_partial_inline(const unsigned char * buff, int len, unsigned int sum) 64 - { 65 - #ifndef __s390x__ 66 - register_pair rp; 67 - 68 - rp.subreg.even = (unsigned long) buff; 69 - rp.subreg.odd = (unsigned long) len; 70 - __asm__ __volatile__ ( 71 - "0: cksm %0,%1\n" /* do checksum on longs */ 72 - " jo 0b\n" 73 - : "+&d" (sum), "+&a" (rp) : : "cc", "memory" ); 74 - #else /* __s390x__ */ 75 - __asm__ __volatile__ ( 76 - " lgr 2,%1\n" /* address in gpr 2 */ 77 - " lgfr 3,%2\n" /* length in gpr 3 */ 78 - "0: cksm %0,2\n" /* do checksum on longs */ 79 - " jo 0b\n" 80 - : "+&d" (sum) 81 - : "d" (buff), "d" (len) 82 - : "cc", "memory", "2", "3" ); 83 - #endif /* __s390x__ */ 36 + asm volatile( 37 + "0: cksm %0,%1\n" /* do checksum on longs */ 38 + " jo 0b\n" 39 + : "+d" (sum), "+d" (reg2), "+d" (reg3) : : "cc", "memory"); 84 40 return sum; 85 41 } 86 42 ··· 70 114 csum_partial_copy_nocheck (const char *src, char *dst, int len, unsigned int sum) 71 115 { 72 116 memcpy(dst,src,len); 73 - return csum_partial_inline(dst, len, sum); 117 + return csum_partial(dst, len, sum); 74 118 } 75 119 76 120 /* ··· 82 126 #ifndef __s390x__ 83 127 register_pair rp; 84 128 85 - __asm__ __volatile__ ( 86 - " slr %N1,%N1\n" /* %0 = H L */ 87 - " lr %1,%0\n" /* %0 = H L, %1 = H L 0 0 */ 88 - " srdl %1,16\n" /* %0 = H L, %1 = 0 H L 0 */ 89 - " alr %1,%N1\n" /* %0 = H L, %1 = L H L 0 */ 90 - " alr %0,%1\n" /* %0 = H+L+C L+H */ 91 - " srl %0,16\n" /* %0 = H+L+C */ 92 - : "+&d" (sum), "=d" (rp) : : "cc" ); 129 + asm volatile( 130 + " slr %N1,%N1\n" /* %0 = H L */ 131 + " lr %1,%0\n" /* %0 = H L, %1 = H L 0 0 */ 132 + " srdl %1,16\n" /* %0 = H L, %1 = 0 H L 0 */ 133 + " alr %1,%N1\n" /* %0 = H L, %1 = L H L 0 */ 134 + " alr %0,%1\n" /* %0 = H+L+C L+H */ 135 + " srl %0,16\n" /* %0 = H+L+C */ 136 + : "+&d" (sum), "=d" (rp) : : "cc"); 93 137 #else /* __s390x__ */ 94 - __asm__ __volatile__ ( 95 - " sr 3,3\n" /* %0 = H*65536 + L */ 96 - " lr 2,%0\n" /* %0 = H L, R2/R3 = H L / 0 0 */ 97 - " srdl 2,16\n" /* %0 = H L, R2/R3 = 0 H / L 0 */ 98 - " alr 2,3\n" /* %0 = H L, R2/R3 = L H / L 0 */ 99 - " alr %0,2\n" /* %0 = H+L+C L+H */ 100 - " srl %0,16\n" /* %0 = H+L+C */ 138 + asm volatile( 139 + " sr 3,3\n" /* %0 = H*65536 + L */ 140 + " lr 2,%0\n" /* %0 = H L, 2/3 = H L / 0 0 */ 141 + " srdl 2,16\n" /* %0 = H L, 2/3 = 0 H / L 0 */ 142 + " alr 2,3\n" /* %0 = H L, 2/3 = L H / L 0 */ 143 + " alr %0,2\n" /* %0 = H+L+C L+H */ 144 + " srl %0,16\n" /* %0 = H+L+C */ 101 145 : "+&d" (sum) : : "cc", "2", "3"); 102 146 #endif /* __s390x__ */ 103 147 return ((unsigned short) ~sum); ··· 111 155 static inline unsigned short 112 156 ip_fast_csum(unsigned char *iph, unsigned int ihl) 113 157 { 114 - unsigned long sum; 115 - #ifndef __s390x__ 116 - register_pair rp; 117 - 118 - rp.subreg.even = (unsigned long) iph; 119 - rp.subreg.odd = (unsigned long) ihl*4; 120 - __asm__ __volatile__ ( 121 - " sr %0,%0\n" /* set sum to zero */ 122 - "0: cksm %0,%1\n" /* do checksum on longs */ 123 - " jo 0b\n" 124 - : "=&d" (sum), "+&a" (rp) : : "cc", "memory" ); 125 - #else /* __s390x__ */ 126 - __asm__ __volatile__ ( 127 - " slgr %0,%0\n" /* set sum to zero */ 128 - " lgr 2,%1\n" /* address in gpr 2 */ 129 - " lgfr 3,%2\n" /* length in gpr 3 */ 130 - "0: cksm %0,2\n" /* do checksum on ints */ 131 - " jo 0b\n" 132 - : "=&d" (sum) 133 - : "d" (iph), "d" (ihl*4) 134 - : "cc", "memory", "2", "3" ); 135 - #endif /* __s390x__ */ 136 - return csum_fold(sum); 158 + return csum_fold(csum_partial(iph, ihl*4, 0)); 137 159 } 138 160 139 161 /* ··· 124 190 unsigned int sum) 125 191 { 126 192 #ifndef __s390x__ 127 - __asm__ __volatile__ ( 128 - " alr %0,%1\n" /* sum += saddr */ 129 - " brc 12,0f\n" 130 - " ahi %0,1\n" /* add carry */ 193 + asm volatile( 194 + " alr %0,%1\n" /* sum += saddr */ 195 + " brc 12,0f\n" 196 + " ahi %0,1\n" /* add carry */ 131 197 "0:" 132 - : "+&d" (sum) : "d" (saddr) : "cc" ); 133 - __asm__ __volatile__ ( 134 - " alr %0,%1\n" /* sum += daddr */ 135 - " brc 12,1f\n" 136 - " ahi %0,1\n" /* add carry */ 198 + : "+&d" (sum) : "d" (saddr) : "cc"); 199 + asm volatile( 200 + " alr %0,%1\n" /* sum += daddr */ 201 + " brc 12,1f\n" 202 + " ahi %0,1\n" /* add carry */ 137 203 "1:" 138 - : "+&d" (sum) : "d" (daddr) : "cc" ); 139 - __asm__ __volatile__ ( 140 - " alr %0,%1\n" /* sum += (len<<16) + (proto<<8) */ 141 - " brc 12,2f\n" 142 - " ahi %0,1\n" /* add carry */ 204 + : "+&d" (sum) : "d" (daddr) : "cc"); 205 + asm volatile( 206 + " alr %0,%1\n" /* sum += (len<<16) + (proto<<8) */ 207 + " brc 12,2f\n" 208 + " ahi %0,1\n" /* add carry */ 143 209 "2:" 144 210 : "+&d" (sum) 145 211 : "d" (((unsigned int) len<<16) + (unsigned int) proto) 146 - : "cc" ); 212 + : "cc"); 147 213 #else /* __s390x__ */ 148 - __asm__ __volatile__ ( 149 - " lgfr %0,%0\n" 150 - " algr %0,%1\n" /* sum += saddr */ 151 - " brc 12,0f\n" 152 - " aghi %0,1\n" /* add carry */ 153 - "0: algr %0,%2\n" /* sum += daddr */ 154 - " brc 12,1f\n" 155 - " aghi %0,1\n" /* add carry */ 156 - "1: algfr %0,%3\n" /* sum += (len<<16) + proto */ 157 - " brc 12,2f\n" 158 - " aghi %0,1\n" /* add carry */ 159 - "2: srlg 0,%0,32\n" 160 - " alr %0,0\n" /* fold to 32 bits */ 161 - " brc 12,3f\n" 162 - " ahi %0,1\n" /* add carry */ 163 - "3: llgfr %0,%0" 214 + asm volatile( 215 + " lgfr %0,%0\n" 216 + " algr %0,%1\n" /* sum += saddr */ 217 + " brc 12,0f\n" 218 + " aghi %0,1\n" /* add carry */ 219 + "0: algr %0,%2\n" /* sum += daddr */ 220 + " brc 12,1f\n" 221 + " aghi %0,1\n" /* add carry */ 222 + "1: algfr %0,%3\n" /* sum += (len<<16) + proto */ 223 + " brc 12,2f\n" 224 + " aghi %0,1\n" /* add carry */ 225 + "2: srlg 0,%0,32\n" 226 + " alr %0,0\n" /* fold to 32 bits */ 227 + " brc 12,3f\n" 228 + " ahi %0,1\n" /* add carry */ 229 + "3: llgfr %0,%0" 164 230 : "+&d" (sum) 165 231 : "d" (saddr), "d" (daddr), 166 232 "d" (((unsigned int) len<<16) + (unsigned int) proto) 167 - : "cc", "0" ); 233 + : "cc", "0"); 168 234 #endif /* __s390x__ */ 169 235 return sum; 170 236 }
+10 -10
include/asm-s390/ebcdic.h
··· 26 26 { 27 27 if (nr-- <= 0) 28 28 return; 29 - __asm__ __volatile__( 30 - " bras 1,1f\n" 31 - " tr 0(1,%0),0(%2)\n" 32 - "0: tr 0(256,%0),0(%2)\n" 33 - " la %0,256(%0)\n" 34 - "1: ahi %1,-256\n" 35 - " jnm 0b\n" 36 - " ex %1,0(1)" 37 - : "+&a" (addr), "+&a" (nr) 38 - : "a" (codepage) : "cc", "memory", "1" ); 29 + asm volatile( 30 + " bras 1,1f\n" 31 + " tr 0(1,%0),0(%2)\n" 32 + "0: tr 0(256,%0),0(%2)\n" 33 + " la %0,256(%0)\n" 34 + "1: ahi %1,-256\n" 35 + " jnm 0b\n" 36 + " ex %1,0(1)" 37 + : "+&a" (addr), "+&a" (nr) 38 + : "a" (codepage) : "cc", "memory", "1"); 39 39 } 40 40 41 41 #define ASCEBC(addr,nr) codepage_convert(_ascebc, addr, nr)
+6 -8
include/asm-s390/io.h
··· 27 27 static inline unsigned long virt_to_phys(volatile void * address) 28 28 { 29 29 unsigned long real_address; 30 - __asm__ ( 30 + asm volatile( 31 31 #ifndef __s390x__ 32 - " lra %0,0(%1)\n" 33 - " jz 0f\n" 34 - " sr %0,%0\n" 32 + " lra %0,0(%1)\n" 35 33 #else /* __s390x__ */ 36 - " lrag %0,0(%1)\n" 37 - " jz 0f\n" 38 - " slgr %0,%0\n" 34 + " lrag %0,0(%1)\n" 39 35 #endif /* __s390x__ */ 36 + " jz 0f\n" 37 + " la %0,0\n" 40 38 "0:" 41 - : "=a" (real_address) : "a" (address) : "cc" ); 39 + : "=a" (real_address) : "a" (address) : "cc"); 42 40 return real_address; 43 41 } 44 42
+82 -32
include/asm-s390/irqflags.h
··· 10 10 11 11 #ifdef __KERNEL__ 12 12 13 - /* interrupt control.. */ 14 - #define raw_local_irq_enable() ({ \ 15 - unsigned long __dummy; \ 16 - __asm__ __volatile__ ( \ 17 - "stosm 0(%1),0x03" \ 18 - : "=m" (__dummy) : "a" (&__dummy) : "memory" ); \ 19 - }) 13 + #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) 20 14 21 - #define raw_local_irq_disable() ({ \ 22 - unsigned long __flags; \ 23 - __asm__ __volatile__ ( \ 24 - "stnsm 0(%1),0xfc" : "=m" (__flags) : "a" (&__flags) ); \ 25 - __flags; \ 26 - }) 27 - 28 - #define raw_local_save_flags(x) \ 29 - do { \ 30 - typecheck(unsigned long, x); \ 31 - __asm__ __volatile__("stosm 0(%1),0" : "=m" (x) : "a" (&x), "m" (x) ); \ 32 - } while (0) 33 - 34 - #define raw_local_irq_restore(x) \ 35 - do { \ 36 - typecheck(unsigned long, x); \ 37 - __asm__ __volatile__("ssm 0(%0)" : : "a" (&x), "m" (x) : "memory"); \ 38 - } while (0) 39 - 40 - #define raw_irqs_disabled() \ 41 - ({ \ 42 - unsigned long flags; \ 43 - raw_local_save_flags(flags); \ 44 - !((flags >> __FLAG_SHIFT) & 3); \ 15 + /* store then or system mask. */ 16 + #define __raw_local_irq_stosm(__or) \ 17 + ({ \ 18 + unsigned long __mask; \ 19 + asm volatile( \ 20 + " stosm %0,%1" \ 21 + : "=Q" (__mask) : "i" (__or) : "memory"); \ 22 + __mask; \ 45 23 }) 24 + 25 + /* store then and system mask. */ 26 + #define __raw_local_irq_stnsm(__and) \ 27 + ({ \ 28 + unsigned long __mask; \ 29 + asm volatile( \ 30 + " stnsm %0,%1" \ 31 + : "=Q" (__mask) : "i" (__and) : "memory"); \ 32 + __mask; \ 33 + }) 34 + 35 + /* set system mask. */ 36 + #define __raw_local_irq_ssm(__mask) \ 37 + ({ \ 38 + asm volatile("ssm %0" : : "Q" (__mask) : "memory"); \ 39 + }) 40 + 41 + #else /* __GNUC__ */ 42 + 43 + /* store then or system mask. */ 44 + #define __raw_local_irq_stosm(__or) \ 45 + ({ \ 46 + unsigned long __mask; \ 47 + asm volatile( \ 48 + " stosm 0(%1),%2" \ 49 + : "=m" (__mask) \ 50 + : "a" (&__mask), "i" (__or) : "memory"); \ 51 + __mask; \ 52 + }) 53 + 54 + /* store then and system mask. */ 55 + #define __raw_local_irq_stnsm(__and) \ 56 + ({ \ 57 + unsigned long __mask; \ 58 + asm volatile( \ 59 + " stnsm 0(%1),%2" \ 60 + : "=m" (__mask) \ 61 + : "a" (&__mask), "i" (__and) : "memory"); \ 62 + __mask; \ 63 + }) 64 + 65 + /* set system mask. */ 66 + #define __raw_local_irq_ssm(__mask) \ 67 + ({ \ 68 + asm volatile( \ 69 + " ssm 0(%0)" \ 70 + : : "a" (&__mask), "m" (__mask) : "memory"); \ 71 + }) 72 + 73 + #endif /* __GNUC__ */ 74 + 75 + /* interrupt control.. */ 76 + static inline unsigned long raw_local_irq_enable(void) 77 + { 78 + return __raw_local_irq_stosm(0x03); 79 + } 80 + 81 + static inline unsigned long raw_local_irq_disable(void) 82 + { 83 + return __raw_local_irq_stnsm(0xfc); 84 + } 85 + 86 + #define raw_local_save_flags(x) \ 87 + do { \ 88 + typecheck(unsigned long, x); \ 89 + (x) = __raw_local_irq_stosm(0x00); \ 90 + } while (0) 91 + 92 + static inline void raw_local_irq_restore(unsigned long flags) 93 + { 94 + __raw_local_irq_ssm(flags); 95 + } 46 96 47 97 static inline int raw_irqs_disabled_flags(unsigned long flags) 48 98 { 49 - return !((flags >> __FLAG_SHIFT) & 3); 99 + return !(flags & (3UL << (BITS_PER_LONG - 8))); 50 100 } 51 101 52 102 /* For spinlocks etc */
+1 -1
include/asm-s390/lowcore.h
··· 359 359 360 360 static inline void set_prefix(__u32 address) 361 361 { 362 - __asm__ __volatile__ ("spx %0" : : "m" (address) : "memory" ); 362 + asm volatile("spx %0" : : "m" (address) : "memory"); 363 363 } 364 364 365 365 #define __PANIC_MAGIC 0xDEADC0DE
+33 -78
include/asm-s390/page.h
··· 22 22 #include <asm/setup.h> 23 23 #ifndef __ASSEMBLY__ 24 24 25 - #ifndef __s390x__ 26 - 27 25 static inline void clear_page(void *page) 28 26 { 29 - register_pair rp; 30 - 31 - rp.subreg.even = (unsigned long) page; 32 - rp.subreg.odd = (unsigned long) 4096; 33 - asm volatile (" slr 1,1\n" 34 - " mvcl %0,0" 35 - : "+&a" (rp) : : "memory", "cc", "1" ); 27 + register unsigned long reg1 asm ("1") = 0; 28 + register void *reg2 asm ("2") = page; 29 + register unsigned long reg3 asm ("3") = 4096; 30 + asm volatile( 31 + " mvcl 2,0" 32 + : "+d" (reg2), "+d" (reg3) : "d" (reg1) : "memory", "cc"); 36 33 } 37 34 38 35 static inline void copy_page(void *to, void *from) 39 36 { 40 - if (MACHINE_HAS_MVPG) 41 - asm volatile (" sr 0,0\n" 42 - " mvpg %0,%1" 43 - : : "a" ((void *)(to)), "a" ((void *)(from)) 44 - : "memory", "cc", "0" ); 45 - else 46 - asm volatile (" mvc 0(256,%0),0(%1)\n" 47 - " mvc 256(256,%0),256(%1)\n" 48 - " mvc 512(256,%0),512(%1)\n" 49 - " mvc 768(256,%0),768(%1)\n" 50 - " mvc 1024(256,%0),1024(%1)\n" 51 - " mvc 1280(256,%0),1280(%1)\n" 52 - " mvc 1536(256,%0),1536(%1)\n" 53 - " mvc 1792(256,%0),1792(%1)\n" 54 - " mvc 2048(256,%0),2048(%1)\n" 55 - " mvc 2304(256,%0),2304(%1)\n" 56 - " mvc 2560(256,%0),2560(%1)\n" 57 - " mvc 2816(256,%0),2816(%1)\n" 58 - " mvc 3072(256,%0),3072(%1)\n" 59 - " mvc 3328(256,%0),3328(%1)\n" 60 - " mvc 3584(256,%0),3584(%1)\n" 61 - " mvc 3840(256,%0),3840(%1)\n" 62 - : : "a"((void *)(to)),"a"((void *)(from)) 63 - : "memory" ); 37 + if (MACHINE_HAS_MVPG) { 38 + register unsigned long reg0 asm ("0") = 0; 39 + asm volatile( 40 + " mvpg %0,%1" 41 + : : "a" (to), "a" (from), "d" (reg0) 42 + : "memory", "cc"); 43 + } else 44 + asm volatile( 45 + " mvc 0(256,%0),0(%1)\n" 46 + " mvc 256(256,%0),256(%1)\n" 47 + " mvc 512(256,%0),512(%1)\n" 48 + " mvc 768(256,%0),768(%1)\n" 49 + " mvc 1024(256,%0),1024(%1)\n" 50 + " mvc 1280(256,%0),1280(%1)\n" 51 + " mvc 1536(256,%0),1536(%1)\n" 52 + " mvc 1792(256,%0),1792(%1)\n" 53 + " mvc 2048(256,%0),2048(%1)\n" 54 + " mvc 2304(256,%0),2304(%1)\n" 55 + " mvc 2560(256,%0),2560(%1)\n" 56 + " mvc 2816(256,%0),2816(%1)\n" 57 + " mvc 3072(256,%0),3072(%1)\n" 58 + " mvc 3328(256,%0),3328(%1)\n" 59 + " mvc 3584(256,%0),3584(%1)\n" 60 + " mvc 3840(256,%0),3840(%1)\n" 61 + : : "a" (to), "a" (from) : "memory"); 64 62 } 65 - 66 - #else /* __s390x__ */ 67 - 68 - static inline void clear_page(void *page) 69 - { 70 - asm volatile (" lgr 2,%0\n" 71 - " lghi 3,4096\n" 72 - " slgr 1,1\n" 73 - " mvcl 2,0" 74 - : : "a" ((void *) (page)) 75 - : "memory", "cc", "1", "2", "3" ); 76 - } 77 - 78 - static inline void copy_page(void *to, void *from) 79 - { 80 - if (MACHINE_HAS_MVPG) 81 - asm volatile (" sgr 0,0\n" 82 - " mvpg %0,%1" 83 - : : "a" ((void *)(to)), "a" ((void *)(from)) 84 - : "memory", "cc", "0" ); 85 - else 86 - asm volatile (" mvc 0(256,%0),0(%1)\n" 87 - " mvc 256(256,%0),256(%1)\n" 88 - " mvc 512(256,%0),512(%1)\n" 89 - " mvc 768(256,%0),768(%1)\n" 90 - " mvc 1024(256,%0),1024(%1)\n" 91 - " mvc 1280(256,%0),1280(%1)\n" 92 - " mvc 1536(256,%0),1536(%1)\n" 93 - " mvc 1792(256,%0),1792(%1)\n" 94 - " mvc 2048(256,%0),2048(%1)\n" 95 - " mvc 2304(256,%0),2304(%1)\n" 96 - " mvc 2560(256,%0),2560(%1)\n" 97 - " mvc 2816(256,%0),2816(%1)\n" 98 - " mvc 3072(256,%0),3072(%1)\n" 99 - " mvc 3328(256,%0),3328(%1)\n" 100 - " mvc 3584(256,%0),3584(%1)\n" 101 - " mvc 3840(256,%0),3840(%1)\n" 102 - : : "a"((void *)(to)),"a"((void *)(from)) 103 - : "memory" ); 104 - } 105 - 106 - #endif /* __s390x__ */ 107 63 108 64 #define clear_user_page(page, vaddr, pg) clear_page(page) 109 65 #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) ··· 115 159 static inline void 116 160 page_set_storage_key(unsigned long addr, unsigned int skey) 117 161 { 118 - asm volatile ( "sske %0,%1" : : "d" (skey), "a" (addr) ); 162 + asm volatile("sske %0,%1" : : "d" (skey), "a" (addr)); 119 163 } 120 164 121 165 static inline unsigned int ··· 123 167 { 124 168 unsigned int skey; 125 169 126 - asm volatile ( "iske %0,%1" : "=d" (skey) : "a" (addr), "0" (0) ); 127 - 170 + asm volatile("iske %0,%1" : "=d" (skey) : "a" (addr), "0" (0)); 128 171 return skey; 129 172 } 130 173
+15 -13
include/asm-s390/pgtable.h
··· 554 554 /* ipte in zarch mode can do the math */ 555 555 pte_t *pto = ptep; 556 556 #endif 557 - asm volatile ("ipte %2,%3" 558 - : "=m" (*ptep) : "m" (*ptep), 559 - "a" (pto), "a" (address) ); 557 + asm volatile( 558 + " ipte %2,%3" 559 + : "=m" (*ptep) : "m" (*ptep), 560 + "a" (pto), "a" (address)); 560 561 } 561 562 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 562 563 } ··· 610 609 /* 611 610 * Test and clear referenced bit in storage key. 612 611 */ 613 - #define page_test_and_clear_young(page) \ 614 - ({ \ 615 - struct page *__page = (page); \ 616 - unsigned long __physpage = __pa((__page-mem_map) << PAGE_SHIFT); \ 617 - int __ccode; \ 618 - asm volatile ("rrbe 0,%1\n\t" \ 619 - "ipm %0\n\t" \ 620 - "srl %0,28\n\t" \ 621 - : "=d" (__ccode) : "a" (__physpage) : "cc" ); \ 622 - (__ccode & 2); \ 612 + #define page_test_and_clear_young(page) \ 613 + ({ \ 614 + struct page *__page = (page); \ 615 + unsigned long __physpage = __pa((__page-mem_map) << PAGE_SHIFT);\ 616 + int __ccode; \ 617 + asm volatile( \ 618 + " rrbe 0,%1\n" \ 619 + " ipm %0\n" \ 620 + " srl %0,28\n" \ 621 + : "=d" (__ccode) : "a" (__physpage) : "cc"); \ 622 + (__ccode & 2); \ 623 623 }) 624 624 625 625 /*
+65 -65
include/asm-s390/processor.h
··· 13 13 #ifndef __ASM_S390_PROCESSOR_H 14 14 #define __ASM_S390_PROCESSOR_H 15 15 16 - #include <asm/page.h> 17 16 #include <asm/ptrace.h> 18 17 19 18 #ifdef __KERNEL__ ··· 20 21 * Default implementation of macro that returns current 21 22 * instruction pointer ("program counter"). 22 23 */ 23 - #define current_text_addr() ({ void *pc; __asm__("basr %0,0":"=a"(pc)); pc; }) 24 + #define current_text_addr() ({ void *pc; asm("basr %0,0" : "=a" (pc)); pc; }) 24 25 25 26 /* 26 27 * CPU type and hardware bug flags. Kept separately for each CPU. ··· 201 202 static inline void cpu_relax(void) 202 203 { 203 204 if (MACHINE_HAS_DIAG44) 204 - asm volatile ("diag 0,0,68" : : : "memory"); 205 + asm volatile("diag 0,0,68" : : : "memory"); 205 206 else 206 207 barrier(); 207 208 } ··· 212 213 static inline void __load_psw(psw_t psw) 213 214 { 214 215 #ifndef __s390x__ 215 - asm volatile ("lpsw 0(%0)" : : "a" (&psw), "m" (psw) : "cc" ); 216 + asm volatile("lpsw 0(%0)" : : "a" (&psw), "m" (psw) : "cc"); 216 217 #else 217 - asm volatile ("lpswe 0(%0)" : : "a" (&psw), "m" (psw) : "cc" ); 218 + asm volatile("lpswe 0(%0)" : : "a" (&psw), "m" (psw) : "cc"); 218 219 #endif 219 220 } 220 221 ··· 231 232 psw.mask = mask; 232 233 233 234 #ifndef __s390x__ 234 - asm volatile ( 235 - " basr %0,0\n" 236 - "0: ahi %0,1f-0b\n" 237 - " st %0,4(%1)\n" 238 - " lpsw 0(%1)\n" 235 + asm volatile( 236 + " basr %0,0\n" 237 + "0: ahi %0,1f-0b\n" 238 + " st %0,4(%1)\n" 239 + " lpsw 0(%1)\n" 239 240 "1:" 240 - : "=&d" (addr) : "a" (&psw), "m" (psw) : "memory", "cc" ); 241 + : "=&d" (addr) : "a" (&psw), "m" (psw) : "memory", "cc"); 241 242 #else /* __s390x__ */ 242 - asm volatile ( 243 - " larl %0,1f\n" 244 - " stg %0,8(%1)\n" 245 - " lpswe 0(%1)\n" 243 + asm volatile( 244 + " larl %0,1f\n" 245 + " stg %0,8(%1)\n" 246 + " lpswe 0(%1)\n" 246 247 "1:" 247 - : "=&d" (addr) : "a" (&psw), "m" (psw) : "memory", "cc" ); 248 + : "=&d" (addr) : "a" (&psw), "m" (psw) : "memory", "cc"); 248 249 #endif /* __s390x__ */ 249 250 } 250 251 ··· 273 274 * the processor is dead afterwards 274 275 */ 275 276 #ifndef __s390x__ 276 - asm volatile (" stctl 0,0,0(%2)\n" 277 - " ni 0(%2),0xef\n" /* switch off protection */ 278 - " lctl 0,0,0(%2)\n" 279 - " stpt 0xd8\n" /* store timer */ 280 - " stckc 0xe0\n" /* store clock comparator */ 281 - " stpx 0x108\n" /* store prefix register */ 282 - " stam 0,15,0x120\n" /* store access registers */ 283 - " std 0,0x160\n" /* store f0 */ 284 - " std 2,0x168\n" /* store f2 */ 285 - " std 4,0x170\n" /* store f4 */ 286 - " std 6,0x178\n" /* store f6 */ 287 - " stm 0,15,0x180\n" /* store general registers */ 288 - " stctl 0,15,0x1c0\n" /* store control registers */ 289 - " oi 0x1c0,0x10\n" /* fake protection bit */ 290 - " lpsw 0(%1)" 291 - : "=m" (ctl_buf) 292 - : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc" ); 277 + asm volatile( 278 + " stctl 0,0,0(%2)\n" 279 + " ni 0(%2),0xef\n" /* switch off protection */ 280 + " lctl 0,0,0(%2)\n" 281 + " stpt 0xd8\n" /* store timer */ 282 + " stckc 0xe0\n" /* store clock comparator */ 283 + " stpx 0x108\n" /* store prefix register */ 284 + " stam 0,15,0x120\n" /* store access registers */ 285 + " std 0,0x160\n" /* store f0 */ 286 + " std 2,0x168\n" /* store f2 */ 287 + " std 4,0x170\n" /* store f4 */ 288 + " std 6,0x178\n" /* store f6 */ 289 + " stm 0,15,0x180\n" /* store general registers */ 290 + " stctl 0,15,0x1c0\n" /* store control registers */ 291 + " oi 0x1c0,0x10\n" /* fake protection bit */ 292 + " lpsw 0(%1)" 293 + : "=m" (ctl_buf) 294 + : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc"); 293 295 #else /* __s390x__ */ 294 - asm volatile (" stctg 0,0,0(%2)\n" 295 - " ni 4(%2),0xef\n" /* switch off protection */ 296 - " lctlg 0,0,0(%2)\n" 297 - " lghi 1,0x1000\n" 298 - " stpt 0x328(1)\n" /* store timer */ 299 - " stckc 0x330(1)\n" /* store clock comparator */ 300 - " stpx 0x318(1)\n" /* store prefix register */ 301 - " stam 0,15,0x340(1)\n" /* store access registers */ 302 - " stfpc 0x31c(1)\n" /* store fpu control */ 303 - " std 0,0x200(1)\n" /* store f0 */ 304 - " std 1,0x208(1)\n" /* store f1 */ 305 - " std 2,0x210(1)\n" /* store f2 */ 306 - " std 3,0x218(1)\n" /* store f3 */ 307 - " std 4,0x220(1)\n" /* store f4 */ 308 - " std 5,0x228(1)\n" /* store f5 */ 309 - " std 6,0x230(1)\n" /* store f6 */ 310 - " std 7,0x238(1)\n" /* store f7 */ 311 - " std 8,0x240(1)\n" /* store f8 */ 312 - " std 9,0x248(1)\n" /* store f9 */ 313 - " std 10,0x250(1)\n" /* store f10 */ 314 - " std 11,0x258(1)\n" /* store f11 */ 315 - " std 12,0x260(1)\n" /* store f12 */ 316 - " std 13,0x268(1)\n" /* store f13 */ 317 - " std 14,0x270(1)\n" /* store f14 */ 318 - " std 15,0x278(1)\n" /* store f15 */ 319 - " stmg 0,15,0x280(1)\n" /* store general registers */ 320 - " stctg 0,15,0x380(1)\n" /* store control registers */ 321 - " oi 0x384(1),0x10\n" /* fake protection bit */ 322 - " lpswe 0(%1)" 323 - : "=m" (ctl_buf) 324 - : "a" (&dw_psw), "a" (&ctl_buf), 325 - "m" (dw_psw) : "cc", "0", "1"); 296 + asm volatile( 297 + " stctg 0,0,0(%2)\n" 298 + " ni 4(%2),0xef\n" /* switch off protection */ 299 + " lctlg 0,0,0(%2)\n" 300 + " lghi 1,0x1000\n" 301 + " stpt 0x328(1)\n" /* store timer */ 302 + " stckc 0x330(1)\n" /* store clock comparator */ 303 + " stpx 0x318(1)\n" /* store prefix register */ 304 + " stam 0,15,0x340(1)\n"/* store access registers */ 305 + " stfpc 0x31c(1)\n" /* store fpu control */ 306 + " std 0,0x200(1)\n" /* store f0 */ 307 + " std 1,0x208(1)\n" /* store f1 */ 308 + " std 2,0x210(1)\n" /* store f2 */ 309 + " std 3,0x218(1)\n" /* store f3 */ 310 + " std 4,0x220(1)\n" /* store f4 */ 311 + " std 5,0x228(1)\n" /* store f5 */ 312 + " std 6,0x230(1)\n" /* store f6 */ 313 + " std 7,0x238(1)\n" /* store f7 */ 314 + " std 8,0x240(1)\n" /* store f8 */ 315 + " std 9,0x248(1)\n" /* store f9 */ 316 + " std 10,0x250(1)\n" /* store f10 */ 317 + " std 11,0x258(1)\n" /* store f11 */ 318 + " std 12,0x260(1)\n" /* store f12 */ 319 + " std 13,0x268(1)\n" /* store f13 */ 320 + " std 14,0x270(1)\n" /* store f14 */ 321 + " std 15,0x278(1)\n" /* store f15 */ 322 + " stmg 0,15,0x280(1)\n"/* store general registers */ 323 + " stctg 0,15,0x380(1)\n"/* store control registers */ 324 + " oi 0x384(1),0x10\n"/* fake protection bit */ 325 + " lpswe 0(%1)" 326 + : "=m" (ctl_buf) 327 + : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0"); 326 328 #endif /* __s390x__ */ 327 329 } 328 330
+1 -1
include/asm-s390/ptrace.h
··· 479 479 static inline void 480 480 psw_set_key(unsigned int key) 481 481 { 482 - asm volatile ( "spka 0(%0)" : : "d" (key) ); 482 + asm volatile("spka 0(%0)" : : "d" (key)); 483 483 } 484 484 485 485 #endif /* __ASSEMBLY__ */
+119 -119
include/asm-s390/rwsem.h
··· 122 122 { 123 123 signed long old, new; 124 124 125 - __asm__ __volatile__( 125 + asm volatile( 126 126 #ifndef __s390x__ 127 - " l %0,0(%3)\n" 128 - "0: lr %1,%0\n" 129 - " ahi %1,%5\n" 130 - " cs %0,%1,0(%3)\n" 131 - " jl 0b" 127 + " l %0,0(%3)\n" 128 + "0: lr %1,%0\n" 129 + " ahi %1,%5\n" 130 + " cs %0,%1,0(%3)\n" 131 + " jl 0b" 132 132 #else /* __s390x__ */ 133 - " lg %0,0(%3)\n" 134 - "0: lgr %1,%0\n" 135 - " aghi %1,%5\n" 136 - " csg %0,%1,0(%3)\n" 137 - " jl 0b" 133 + " lg %0,0(%3)\n" 134 + "0: lgr %1,%0\n" 135 + " aghi %1,%5\n" 136 + " csg %0,%1,0(%3)\n" 137 + " jl 0b" 138 138 #endif /* __s390x__ */ 139 - : "=&d" (old), "=&d" (new), "=m" (sem->count) 139 + : "=&d" (old), "=&d" (new), "=m" (sem->count) 140 140 : "a" (&sem->count), "m" (sem->count), 141 - "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory" ); 141 + "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory"); 142 142 if (old < 0) 143 143 rwsem_down_read_failed(sem); 144 144 } ··· 150 150 { 151 151 signed long old, new; 152 152 153 - __asm__ __volatile__( 153 + asm volatile( 154 154 #ifndef __s390x__ 155 - " l %0,0(%3)\n" 156 - "0: ltr %1,%0\n" 157 - " jm 1f\n" 158 - " ahi %1,%5\n" 159 - " cs %0,%1,0(%3)\n" 160 - " jl 0b\n" 155 + " l %0,0(%3)\n" 156 + "0: ltr %1,%0\n" 157 + " jm 1f\n" 158 + " ahi %1,%5\n" 159 + " cs %0,%1,0(%3)\n" 160 + " jl 0b\n" 161 161 "1:" 162 162 #else /* __s390x__ */ 163 - " lg %0,0(%3)\n" 164 - "0: ltgr %1,%0\n" 165 - " jm 1f\n" 166 - " aghi %1,%5\n" 167 - " csg %0,%1,0(%3)\n" 168 - " jl 0b\n" 163 + " lg %0,0(%3)\n" 164 + "0: ltgr %1,%0\n" 165 + " jm 1f\n" 166 + " aghi %1,%5\n" 167 + " csg %0,%1,0(%3)\n" 168 + " jl 0b\n" 169 169 "1:" 170 170 #endif /* __s390x__ */ 171 - : "=&d" (old), "=&d" (new), "=m" (sem->count) 171 + : "=&d" (old), "=&d" (new), "=m" (sem->count) 172 172 : "a" (&sem->count), "m" (sem->count), 173 - "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory" ); 173 + "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory"); 174 174 return old >= 0 ? 1 : 0; 175 175 } 176 176 ··· 182 182 signed long old, new, tmp; 183 183 184 184 tmp = RWSEM_ACTIVE_WRITE_BIAS; 185 - __asm__ __volatile__( 185 + asm volatile( 186 186 #ifndef __s390x__ 187 - " l %0,0(%3)\n" 188 - "0: lr %1,%0\n" 189 - " a %1,%5\n" 190 - " cs %0,%1,0(%3)\n" 191 - " jl 0b" 187 + " l %0,0(%3)\n" 188 + "0: lr %1,%0\n" 189 + " a %1,%5\n" 190 + " cs %0,%1,0(%3)\n" 191 + " jl 0b" 192 192 #else /* __s390x__ */ 193 - " lg %0,0(%3)\n" 194 - "0: lgr %1,%0\n" 195 - " ag %1,%5\n" 196 - " csg %0,%1,0(%3)\n" 197 - " jl 0b" 193 + " lg %0,0(%3)\n" 194 + "0: lgr %1,%0\n" 195 + " ag %1,%5\n" 196 + " csg %0,%1,0(%3)\n" 197 + " jl 0b" 198 198 #endif /* __s390x__ */ 199 - : "=&d" (old), "=&d" (new), "=m" (sem->count) 199 + : "=&d" (old), "=&d" (new), "=m" (sem->count) 200 200 : "a" (&sem->count), "m" (sem->count), "m" (tmp) 201 - : "cc", "memory" ); 201 + : "cc", "memory"); 202 202 if (old != 0) 203 203 rwsem_down_write_failed(sem); 204 204 } ··· 215 215 { 216 216 signed long old; 217 217 218 - __asm__ __volatile__( 218 + asm volatile( 219 219 #ifndef __s390x__ 220 - " l %0,0(%2)\n" 221 - "0: ltr %0,%0\n" 222 - " jnz 1f\n" 223 - " cs %0,%4,0(%2)\n" 224 - " jl 0b\n" 220 + " l %0,0(%2)\n" 221 + "0: ltr %0,%0\n" 222 + " jnz 1f\n" 223 + " cs %0,%4,0(%2)\n" 224 + " jl 0b\n" 225 225 #else /* __s390x__ */ 226 - " lg %0,0(%2)\n" 227 - "0: ltgr %0,%0\n" 228 - " jnz 1f\n" 229 - " csg %0,%4,0(%2)\n" 230 - " jl 0b\n" 226 + " lg %0,0(%2)\n" 227 + "0: ltgr %0,%0\n" 228 + " jnz 1f\n" 229 + " csg %0,%4,0(%2)\n" 230 + " jl 0b\n" 231 231 #endif /* __s390x__ */ 232 232 "1:" 233 - : "=&d" (old), "=m" (sem->count) 233 + : "=&d" (old), "=m" (sem->count) 234 234 : "a" (&sem->count), "m" (sem->count), 235 - "d" (RWSEM_ACTIVE_WRITE_BIAS) : "cc", "memory" ); 235 + "d" (RWSEM_ACTIVE_WRITE_BIAS) : "cc", "memory"); 236 236 return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0; 237 237 } 238 238 ··· 243 243 { 244 244 signed long old, new; 245 245 246 - __asm__ __volatile__( 246 + asm volatile( 247 247 #ifndef __s390x__ 248 - " l %0,0(%3)\n" 249 - "0: lr %1,%0\n" 250 - " ahi %1,%5\n" 251 - " cs %0,%1,0(%3)\n" 252 - " jl 0b" 248 + " l %0,0(%3)\n" 249 + "0: lr %1,%0\n" 250 + " ahi %1,%5\n" 251 + " cs %0,%1,0(%3)\n" 252 + " jl 0b" 253 253 #else /* __s390x__ */ 254 - " lg %0,0(%3)\n" 255 - "0: lgr %1,%0\n" 256 - " aghi %1,%5\n" 257 - " csg %0,%1,0(%3)\n" 258 - " jl 0b" 254 + " lg %0,0(%3)\n" 255 + "0: lgr %1,%0\n" 256 + " aghi %1,%5\n" 257 + " csg %0,%1,0(%3)\n" 258 + " jl 0b" 259 259 #endif /* __s390x__ */ 260 - : "=&d" (old), "=&d" (new), "=m" (sem->count) 260 + : "=&d" (old), "=&d" (new), "=m" (sem->count) 261 261 : "a" (&sem->count), "m" (sem->count), 262 262 "i" (-RWSEM_ACTIVE_READ_BIAS) 263 - : "cc", "memory" ); 263 + : "cc", "memory"); 264 264 if (new < 0) 265 265 if ((new & RWSEM_ACTIVE_MASK) == 0) 266 266 rwsem_wake(sem); ··· 274 274 signed long old, new, tmp; 275 275 276 276 tmp = -RWSEM_ACTIVE_WRITE_BIAS; 277 - __asm__ __volatile__( 277 + asm volatile( 278 278 #ifndef __s390x__ 279 - " l %0,0(%3)\n" 280 - "0: lr %1,%0\n" 281 - " a %1,%5\n" 282 - " cs %0,%1,0(%3)\n" 283 - " jl 0b" 279 + " l %0,0(%3)\n" 280 + "0: lr %1,%0\n" 281 + " a %1,%5\n" 282 + " cs %0,%1,0(%3)\n" 283 + " jl 0b" 284 284 #else /* __s390x__ */ 285 - " lg %0,0(%3)\n" 286 - "0: lgr %1,%0\n" 287 - " ag %1,%5\n" 288 - " csg %0,%1,0(%3)\n" 289 - " jl 0b" 285 + " lg %0,0(%3)\n" 286 + "0: lgr %1,%0\n" 287 + " ag %1,%5\n" 288 + " csg %0,%1,0(%3)\n" 289 + " jl 0b" 290 290 #endif /* __s390x__ */ 291 - : "=&d" (old), "=&d" (new), "=m" (sem->count) 291 + : "=&d" (old), "=&d" (new), "=m" (sem->count) 292 292 : "a" (&sem->count), "m" (sem->count), "m" (tmp) 293 - : "cc", "memory" ); 293 + : "cc", "memory"); 294 294 if (new < 0) 295 295 if ((new & RWSEM_ACTIVE_MASK) == 0) 296 296 rwsem_wake(sem); ··· 304 304 signed long old, new, tmp; 305 305 306 306 tmp = -RWSEM_WAITING_BIAS; 307 - __asm__ __volatile__( 307 + asm volatile( 308 308 #ifndef __s390x__ 309 - " l %0,0(%3)\n" 310 - "0: lr %1,%0\n" 311 - " a %1,%5\n" 312 - " cs %0,%1,0(%3)\n" 313 - " jl 0b" 309 + " l %0,0(%3)\n" 310 + "0: lr %1,%0\n" 311 + " a %1,%5\n" 312 + " cs %0,%1,0(%3)\n" 313 + " jl 0b" 314 314 #else /* __s390x__ */ 315 - " lg %0,0(%3)\n" 316 - "0: lgr %1,%0\n" 317 - " ag %1,%5\n" 318 - " csg %0,%1,0(%3)\n" 319 - " jl 0b" 315 + " lg %0,0(%3)\n" 316 + "0: lgr %1,%0\n" 317 + " ag %1,%5\n" 318 + " csg %0,%1,0(%3)\n" 319 + " jl 0b" 320 320 #endif /* __s390x__ */ 321 - : "=&d" (old), "=&d" (new), "=m" (sem->count) 321 + : "=&d" (old), "=&d" (new), "=m" (sem->count) 322 322 : "a" (&sem->count), "m" (sem->count), "m" (tmp) 323 - : "cc", "memory" ); 323 + : "cc", "memory"); 324 324 if (new > 1) 325 325 rwsem_downgrade_wake(sem); 326 326 } ··· 332 332 { 333 333 signed long old, new; 334 334 335 - __asm__ __volatile__( 335 + asm volatile( 336 336 #ifndef __s390x__ 337 - " l %0,0(%3)\n" 338 - "0: lr %1,%0\n" 339 - " ar %1,%5\n" 340 - " cs %0,%1,0(%3)\n" 341 - " jl 0b" 337 + " l %0,0(%3)\n" 338 + "0: lr %1,%0\n" 339 + " ar %1,%5\n" 340 + " cs %0,%1,0(%3)\n" 341 + " jl 0b" 342 342 #else /* __s390x__ */ 343 - " lg %0,0(%3)\n" 344 - "0: lgr %1,%0\n" 345 - " agr %1,%5\n" 346 - " csg %0,%1,0(%3)\n" 347 - " jl 0b" 343 + " lg %0,0(%3)\n" 344 + "0: lgr %1,%0\n" 345 + " agr %1,%5\n" 346 + " csg %0,%1,0(%3)\n" 347 + " jl 0b" 348 348 #endif /* __s390x__ */ 349 - : "=&d" (old), "=&d" (new), "=m" (sem->count) 349 + : "=&d" (old), "=&d" (new), "=m" (sem->count) 350 350 : "a" (&sem->count), "m" (sem->count), "d" (delta) 351 - : "cc", "memory" ); 351 + : "cc", "memory"); 352 352 } 353 353 354 354 /* ··· 358 358 { 359 359 signed long old, new; 360 360 361 - __asm__ __volatile__( 361 + asm volatile( 362 362 #ifndef __s390x__ 363 - " l %0,0(%3)\n" 364 - "0: lr %1,%0\n" 365 - " ar %1,%5\n" 366 - " cs %0,%1,0(%3)\n" 367 - " jl 0b" 363 + " l %0,0(%3)\n" 364 + "0: lr %1,%0\n" 365 + " ar %1,%5\n" 366 + " cs %0,%1,0(%3)\n" 367 + " jl 0b" 368 368 #else /* __s390x__ */ 369 - " lg %0,0(%3)\n" 370 - "0: lgr %1,%0\n" 371 - " agr %1,%5\n" 372 - " csg %0,%1,0(%3)\n" 373 - " jl 0b" 369 + " lg %0,0(%3)\n" 370 + "0: lgr %1,%0\n" 371 + " agr %1,%5\n" 372 + " csg %0,%1,0(%3)\n" 373 + " jl 0b" 374 374 #endif /* __s390x__ */ 375 - : "=&d" (old), "=&d" (new), "=m" (sem->count) 375 + : "=&d" (old), "=&d" (new), "=m" (sem->count) 376 376 : "a" (&sem->count), "m" (sem->count), "d" (delta) 377 - : "cc", "memory" ); 377 + : "cc", "memory"); 378 378 return new; 379 379 } 380 380
+8 -8
include/asm-s390/semaphore.h
··· 85 85 * sem->count.counter = --new_val; 86 86 * In the ppc code this is called atomic_dec_if_positive. 87 87 */ 88 - __asm__ __volatile__ ( 89 - " l %0,0(%3)\n" 90 - "0: ltr %1,%0\n" 91 - " jle 1f\n" 92 - " ahi %1,-1\n" 93 - " cs %0,%1,0(%3)\n" 94 - " jl 0b\n" 88 + asm volatile( 89 + " l %0,0(%3)\n" 90 + "0: ltr %1,%0\n" 91 + " jle 1f\n" 92 + " ahi %1,-1\n" 93 + " cs %0,%1,0(%3)\n" 94 + " jl 0b\n" 95 95 "1:" 96 96 : "=&d" (old_val), "=&d" (new_val), "=m" (sem->count.counter) 97 97 : "a" (&sem->count.counter), "m" (sem->count.counter) 98 - : "cc", "memory" ); 98 + : "cc", "memory"); 99 99 return old_val <= 0; 100 100 } 101 101
+34 -30
include/asm-s390/sfp-machine.h
··· 76 76 unsigned int __r2 = (x2) + (y2); \ 77 77 unsigned int __r1 = (x1); \ 78 78 unsigned int __r0 = (x0); \ 79 - __asm__ (" alr %2,%3\n" \ 80 - " brc 12,0f\n" \ 81 - " lhi 0,1\n" \ 82 - " alr %1,0\n" \ 83 - " brc 12,0f\n" \ 84 - " alr %0,0\n" \ 85 - "0:" \ 86 - : "+&d" (__r2), "+&d" (__r1), "+&d" (__r0) \ 87 - : "d" (y0), "i" (1) : "cc", "0" ); \ 88 - __asm__ (" alr %1,%2\n" \ 89 - " brc 12,0f\n" \ 90 - " ahi %0,1\n" \ 91 - "0:" \ 92 - : "+&d" (__r2), "+&d" (__r1) \ 93 - : "d" (y1) : "cc" ); \ 79 + asm volatile( \ 80 + " alr %2,%3\n" \ 81 + " brc 12,0f\n" \ 82 + " lhi 0,1\n" \ 83 + " alr %1,0\n" \ 84 + " brc 12,0f\n" \ 85 + " alr %0,0\n" \ 86 + "0:" \ 87 + : "+&d" (__r2), "+&d" (__r1), "+&d" (__r0) \ 88 + : "d" (y0), "i" (1) : "cc", "0" ); \ 89 + asm volatile( \ 90 + " alr %1,%2\n" \ 91 + " brc 12,0f\n" \ 92 + " ahi %0,1\n" \ 93 + "0:" \ 94 + : "+&d" (__r2), "+&d" (__r1) \ 95 + : "d" (y1) : "cc"); \ 94 96 (r2) = __r2; \ 95 97 (r1) = __r1; \ 96 98 (r0) = __r0; \ ··· 102 100 unsigned int __r2 = (x2) - (y2); \ 103 101 unsigned int __r1 = (x1); \ 104 102 unsigned int __r0 = (x0); \ 105 - __asm__ (" slr %2,%3\n" \ 106 - " brc 3,0f\n" \ 107 - " lhi 0,1\n" \ 108 - " slr %1,0\n" \ 109 - " brc 3,0f\n" \ 110 - " slr %0,0\n" \ 111 - "0:" \ 112 - : "+&d" (__r2), "+&d" (__r1), "+&d" (__r0) \ 113 - : "d" (y0) : "cc", "0" ); \ 114 - __asm__ (" slr %1,%2\n" \ 115 - " brc 3,0f\n" \ 116 - " ahi %0,-1\n" \ 117 - "0:" \ 118 - : "+&d" (__r2), "+&d" (__r1) \ 119 - : "d" (y1) : "cc" ); \ 103 + asm volatile( \ 104 + " slr %2,%3\n" \ 105 + " brc 3,0f\n" \ 106 + " lhi 0,1\n" \ 107 + " slr %1,0\n" \ 108 + " brc 3,0f\n" \ 109 + " slr %0,0\n" \ 110 + "0:" \ 111 + : "+&d" (__r2), "+&d" (__r1), "+&d" (__r0) \ 112 + : "d" (y0) : "cc", "0"); \ 113 + asm volatile( \ 114 + " slr %1,%2\n" \ 115 + " brc 3,0f\n" \ 116 + " ahi %0,-1\n" \ 117 + "0:" \ 118 + : "+&d" (__r2), "+&d" (__r1) \ 119 + : "d" (y1) : "cc"); \ 120 120 (r2) = __r2; \ 121 121 (r1) = __r1; \ 122 122 (r0) = __r0; \
+30 -35
include/asm-s390/sigp.h
··· 70 70 static inline sigp_ccode 71 71 signal_processor(__u16 cpu_addr, sigp_order_code order_code) 72 72 { 73 + register unsigned long reg1 asm ("1") = 0; 73 74 sigp_ccode ccode; 74 75 75 - __asm__ __volatile__( 76 - " sr 1,1\n" /* parameter=0 in gpr 1 */ 77 - " sigp 1,%1,0(%2)\n" 78 - " ipm %0\n" 79 - " srl %0,28\n" 80 - : "=d" (ccode) 81 - : "d" (__cpu_logical_map[cpu_addr]), "a" (order_code) 82 - : "cc" , "memory", "1" ); 76 + asm volatile( 77 + " sigp %1,%2,0(%3)\n" 78 + " ipm %0\n" 79 + " srl %0,28\n" 80 + : "=d" (ccode) 81 + : "d" (reg1), "d" (__cpu_logical_map[cpu_addr]), 82 + "a" (order_code) : "cc" , "memory"); 83 83 return ccode; 84 84 } 85 85 ··· 87 87 * Signal processor with parameter 88 88 */ 89 89 static inline sigp_ccode 90 - signal_processor_p(__u32 parameter, __u16 cpu_addr, 91 - sigp_order_code order_code) 90 + signal_processor_p(__u32 parameter, __u16 cpu_addr, sigp_order_code order_code) 92 91 { 92 + register unsigned int reg1 asm ("1") = parameter; 93 93 sigp_ccode ccode; 94 - 95 - __asm__ __volatile__( 96 - " lr 1,%1\n" /* parameter in gpr 1 */ 97 - " sigp 1,%2,0(%3)\n" 98 - " ipm %0\n" 99 - " srl %0,28\n" 94 + 95 + asm volatile( 96 + " sigp %1,%2,0(%3)\n" 97 + " ipm %0\n" 98 + " srl %0,28\n" 100 99 : "=d" (ccode) 101 - : "d" (parameter), "d" (__cpu_logical_map[cpu_addr]), 102 - "a" (order_code) 103 - : "cc" , "memory", "1" ); 100 + : "d" (reg1), "d" (__cpu_logical_map[cpu_addr]), 101 + "a" (order_code) : "cc" , "memory"); 104 102 return ccode; 105 103 } 106 104 ··· 106 108 * Signal processor with parameter and return status 107 109 */ 108 110 static inline sigp_ccode 109 - signal_processor_ps(__u32 *statusptr, __u32 parameter, 110 - __u16 cpu_addr, sigp_order_code order_code) 111 + signal_processor_ps(__u32 *statusptr, __u32 parameter, __u16 cpu_addr, 112 + sigp_order_code order_code) 111 113 { 114 + register unsigned int reg1 asm ("1") = parameter; 112 115 sigp_ccode ccode; 113 - 114 - __asm__ __volatile__( 115 - " sr 2,2\n" /* clear status */ 116 - " lr 3,%2\n" /* parameter in gpr 3 */ 117 - " sigp 2,%3,0(%4)\n" 118 - " st 2,%1\n" 119 - " ipm %0\n" 120 - " srl %0,28\n" 121 - : "=d" (ccode), "=m" (*statusptr) 122 - : "d" (parameter), "d" (__cpu_logical_map[cpu_addr]), 123 - "a" (order_code) 124 - : "cc" , "memory", "2" , "3" 125 - ); 126 - return ccode; 116 + 117 + asm volatile( 118 + " sigp %1,%2,0(%3)\n" 119 + " ipm %0\n" 120 + " srl %0,28\n" 121 + : "=d" (ccode), "+d" (reg1) 122 + : "d" (__cpu_logical_map[cpu_addr]), "a" (order_code) 123 + : "cc" , "memory"); 124 + *statusptr = reg1; 125 + return ccode; 127 126 } 128 127 129 128 #endif /* __SIGP__ */
+1 -1
include/asm-s390/smp.h
··· 56 56 { 57 57 __u16 cpu_address; 58 58 59 - __asm__ ("stap %0\n" : "=m" (cpu_address)); 59 + asm volatile("stap %0" : "=m" (cpu_address)); 60 60 return cpu_address; 61 61 } 62 62
+23 -4
include/asm-s390/spinlock.h
··· 11 11 #ifndef __ASM_SPINLOCK_H 12 12 #define __ASM_SPINLOCK_H 13 13 14 + #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) 15 + 14 16 static inline int 15 17 _raw_compare_and_swap(volatile unsigned int *lock, 16 18 unsigned int old, unsigned int new) 17 19 { 18 - asm volatile ("cs %0,%3,0(%4)" 19 - : "=d" (old), "=m" (*lock) 20 - : "0" (old), "d" (new), "a" (lock), "m" (*lock) 21 - : "cc", "memory" ); 20 + asm volatile( 21 + " cs %0,%3,%1" 22 + : "=d" (old), "=Q" (*lock) 23 + : "0" (old), "d" (new), "Q" (*lock) 24 + : "cc", "memory" ); 22 25 return old; 23 26 } 27 + 28 + #else /* __GNUC__ */ 29 + 30 + static inline int 31 + _raw_compare_and_swap(volatile unsigned int *lock, 32 + unsigned int old, unsigned int new) 33 + { 34 + asm volatile( 35 + " cs %0,%3,0(%4)" 36 + : "=d" (old), "=m" (*lock) 37 + : "0" (old), "d" (new), "a" (lock), "m" (*lock) 38 + : "cc", "memory" ); 39 + return old; 40 + } 41 + 42 + #endif /* __GNUC__ */ 24 43 25 44 /* 26 45 * Simple spin lock operations. There are two variants, one clears IRQ's
+31 -25
include/asm-s390/string.h
··· 60 60 register int r0 asm("0") = (char) c; 61 61 const void *ret = s + n; 62 62 63 - asm volatile ("0: srst %0,%1\n" 64 - " jo 0b\n" 65 - " jl 1f\n" 66 - " la %0,0\n" 67 - "1:" 68 - : "+a" (ret), "+&a" (s) : "d" (r0) : "cc" ); 63 + asm volatile( 64 + "0: srst %0,%1\n" 65 + " jo 0b\n" 66 + " jl 1f\n" 67 + " la %0,0\n" 68 + "1:" 69 + : "+a" (ret), "+&a" (s) : "d" (r0) : "cc"); 69 70 return (void *) ret; 70 71 } 71 72 ··· 75 74 register int r0 asm("0") = (char) c; 76 75 const void *ret = s + n; 77 76 78 - asm volatile ("0: srst %0,%1\n" 79 - " jo 0b\n" 80 - : "+a" (ret), "+&a" (s) : "d" (r0) : "cc" ); 77 + asm volatile( 78 + "0: srst %0,%1\n" 79 + " jo 0b\n" 80 + : "+a" (ret), "+&a" (s) : "d" (r0) : "cc"); 81 81 return (void *) ret; 82 82 } 83 83 ··· 88 86 unsigned long dummy; 89 87 char *ret = dst; 90 88 91 - asm volatile ("0: srst %0,%1\n" 92 - " jo 0b\n" 93 - "1: mvst %0,%2\n" 94 - " jo 1b" 95 - : "=&a" (dummy), "+a" (dst), "+a" (src) 96 - : "d" (r0), "0" (0) : "cc", "memory" ); 89 + asm volatile( 90 + "0: srst %0,%1\n" 91 + " jo 0b\n" 92 + "1: mvst %0,%2\n" 93 + " jo 1b" 94 + : "=&a" (dummy), "+a" (dst), "+a" (src) 95 + : "d" (r0), "0" (0) : "cc", "memory" ); 97 96 return ret; 98 97 } 99 98 ··· 103 100 register int r0 asm("0") = 0; 104 101 char *ret = dst; 105 102 106 - asm volatile ("0: mvst %0,%1\n" 107 - " jo 0b" 108 - : "+&a" (dst), "+&a" (src) : "d" (r0) 109 - : "cc", "memory" ); 103 + asm volatile( 104 + "0: mvst %0,%1\n" 105 + " jo 0b" 106 + : "+&a" (dst), "+&a" (src) : "d" (r0) 107 + : "cc", "memory"); 110 108 return ret; 111 109 } 112 110 ··· 116 112 register unsigned long r0 asm("0") = 0; 117 113 const char *tmp = s; 118 114 119 - asm volatile ("0: srst %0,%1\n" 120 - " jo 0b" 121 - : "+d" (r0), "+a" (tmp) : : "cc" ); 115 + asm volatile( 116 + "0: srst %0,%1\n" 117 + " jo 0b" 118 + : "+d" (r0), "+a" (tmp) : : "cc"); 122 119 return r0 - (unsigned long) s; 123 120 } 124 121 ··· 129 124 const char *tmp = s; 130 125 const char *end = s + n; 131 126 132 - asm volatile ("0: srst %0,%1\n" 133 - " jo 0b" 134 - : "+a" (end), "+a" (tmp) : "d" (r0) : "cc" ); 127 + asm volatile( 128 + "0: srst %0,%1\n" 129 + " jo 0b" 130 + : "+a" (end), "+a" (tmp) : "d" (r0) : "cc"); 135 131 return end - s; 136 132 } 137 133
+138 -206
include/asm-s390/system.h
··· 23 23 24 24 extern struct task_struct *__switch_to(void *, void *); 25 25 26 - #ifdef __s390x__ 27 - #define __FLAG_SHIFT 56 28 - #else /* ! __s390x__ */ 29 - #define __FLAG_SHIFT 24 30 - #endif /* ! __s390x__ */ 31 - 32 26 static inline void save_fp_regs(s390_fp_regs *fpregs) 33 27 { 34 - asm volatile ( 35 - " std 0,8(%1)\n" 36 - " std 2,24(%1)\n" 37 - " std 4,40(%1)\n" 38 - " std 6,56(%1)" 39 - : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory" ); 28 + asm volatile( 29 + " std 0,8(%1)\n" 30 + " std 2,24(%1)\n" 31 + " std 4,40(%1)\n" 32 + " std 6,56(%1)" 33 + : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory"); 40 34 if (!MACHINE_HAS_IEEE) 41 35 return; 42 36 asm volatile( 43 - " stfpc 0(%1)\n" 44 - " std 1,16(%1)\n" 45 - " std 3,32(%1)\n" 46 - " std 5,48(%1)\n" 47 - " std 7,64(%1)\n" 48 - " std 8,72(%1)\n" 49 - " std 9,80(%1)\n" 50 - " std 10,88(%1)\n" 51 - " std 11,96(%1)\n" 52 - " std 12,104(%1)\n" 53 - " std 13,112(%1)\n" 54 - " std 14,120(%1)\n" 55 - " std 15,128(%1)\n" 56 - : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory" ); 37 + " stfpc 0(%1)\n" 38 + " std 1,16(%1)\n" 39 + " std 3,32(%1)\n" 40 + " std 5,48(%1)\n" 41 + " std 7,64(%1)\n" 42 + " std 8,72(%1)\n" 43 + " std 9,80(%1)\n" 44 + " std 10,88(%1)\n" 45 + " std 11,96(%1)\n" 46 + " std 12,104(%1)\n" 47 + " std 13,112(%1)\n" 48 + " std 14,120(%1)\n" 49 + " std 15,128(%1)\n" 50 + : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory"); 57 51 } 58 52 59 53 static inline void restore_fp_regs(s390_fp_regs *fpregs) 60 54 { 61 - asm volatile ( 62 - " ld 0,8(%0)\n" 63 - " ld 2,24(%0)\n" 64 - " ld 4,40(%0)\n" 65 - " ld 6,56(%0)" 66 - : : "a" (fpregs), "m" (*fpregs) ); 55 + asm volatile( 56 + " ld 0,8(%0)\n" 57 + " ld 2,24(%0)\n" 58 + " ld 4,40(%0)\n" 59 + " ld 6,56(%0)" 60 + : : "a" (fpregs), "m" (*fpregs)); 67 61 if (!MACHINE_HAS_IEEE) 68 62 return; 69 63 asm volatile( 70 - " lfpc 0(%0)\n" 71 - " ld 1,16(%0)\n" 72 - " ld 3,32(%0)\n" 73 - " ld 5,48(%0)\n" 74 - " ld 7,64(%0)\n" 75 - " ld 8,72(%0)\n" 76 - " ld 9,80(%0)\n" 77 - " ld 10,88(%0)\n" 78 - " ld 11,96(%0)\n" 79 - " ld 12,104(%0)\n" 80 - " ld 13,112(%0)\n" 81 - " ld 14,120(%0)\n" 82 - " ld 15,128(%0)\n" 83 - : : "a" (fpregs), "m" (*fpregs) ); 64 + " lfpc 0(%0)\n" 65 + " ld 1,16(%0)\n" 66 + " ld 3,32(%0)\n" 67 + " ld 5,48(%0)\n" 68 + " ld 7,64(%0)\n" 69 + " ld 8,72(%0)\n" 70 + " ld 9,80(%0)\n" 71 + " ld 10,88(%0)\n" 72 + " ld 11,96(%0)\n" 73 + " ld 12,104(%0)\n" 74 + " ld 13,112(%0)\n" 75 + " ld 14,120(%0)\n" 76 + " ld 15,128(%0)\n" 77 + : : "a" (fpregs), "m" (*fpregs)); 84 78 } 85 79 86 80 static inline void save_access_regs(unsigned int *acrs) 87 81 { 88 - asm volatile ("stam 0,15,0(%0)" : : "a" (acrs) : "memory" ); 82 + asm volatile("stam 0,15,0(%0)" : : "a" (acrs) : "memory"); 89 83 } 90 84 91 85 static inline void restore_access_regs(unsigned int *acrs) 92 86 { 93 - asm volatile ("lam 0,15,0(%0)" : : "a" (acrs) ); 87 + asm volatile("lam 0,15,0(%0)" : : "a" (acrs)); 94 88 } 95 89 96 90 #define switch_to(prev,next,last) do { \ ··· 120 126 account_vtime(prev); \ 121 127 } while (0) 122 128 123 - #define nop() __asm__ __volatile__ ("nop") 129 + #define nop() asm volatile("nop") 124 130 125 131 #define xchg(ptr,x) \ 126 132 ({ \ ··· 141 147 shift = (3 ^ (addr & 3)) << 3; 142 148 addr ^= addr & 3; 143 149 asm volatile( 144 - " l %0,0(%4)\n" 145 - "0: lr 0,%0\n" 146 - " nr 0,%3\n" 147 - " or 0,%2\n" 148 - " cs %0,0,0(%4)\n" 149 - " jl 0b\n" 150 + " l %0,0(%4)\n" 151 + "0: lr 0,%0\n" 152 + " nr 0,%3\n" 153 + " or 0,%2\n" 154 + " cs %0,0,0(%4)\n" 155 + " jl 0b\n" 150 156 : "=&d" (old), "=m" (*(int *) addr) 151 157 : "d" (x << shift), "d" (~(255 << shift)), "a" (addr), 152 - "m" (*(int *) addr) : "memory", "cc", "0" ); 158 + "m" (*(int *) addr) : "memory", "cc", "0"); 153 159 x = old >> shift; 154 160 break; 155 161 case 2: ··· 157 163 shift = (2 ^ (addr & 2)) << 3; 158 164 addr ^= addr & 2; 159 165 asm volatile( 160 - " l %0,0(%4)\n" 161 - "0: lr 0,%0\n" 162 - " nr 0,%3\n" 163 - " or 0,%2\n" 164 - " cs %0,0,0(%4)\n" 165 - " jl 0b\n" 166 + " l %0,0(%4)\n" 167 + "0: lr 0,%0\n" 168 + " nr 0,%3\n" 169 + " or 0,%2\n" 170 + " cs %0,0,0(%4)\n" 171 + " jl 0b\n" 166 172 : "=&d" (old), "=m" (*(int *) addr) 167 173 : "d" (x << shift), "d" (~(65535 << shift)), "a" (addr), 168 - "m" (*(int *) addr) : "memory", "cc", "0" ); 174 + "m" (*(int *) addr) : "memory", "cc", "0"); 169 175 x = old >> shift; 170 176 break; 171 177 case 4: 172 - asm volatile ( 173 - " l %0,0(%3)\n" 174 - "0: cs %0,%2,0(%3)\n" 175 - " jl 0b\n" 178 + asm volatile( 179 + " l %0,0(%3)\n" 180 + "0: cs %0,%2,0(%3)\n" 181 + " jl 0b\n" 176 182 : "=&d" (old), "=m" (*(int *) ptr) 177 183 : "d" (x), "a" (ptr), "m" (*(int *) ptr) 178 - : "memory", "cc" ); 184 + : "memory", "cc"); 179 185 x = old; 180 186 break; 181 187 #ifdef __s390x__ 182 188 case 8: 183 - asm volatile ( 184 - " lg %0,0(%3)\n" 185 - "0: csg %0,%2,0(%3)\n" 186 - " jl 0b\n" 189 + asm volatile( 190 + " lg %0,0(%3)\n" 191 + "0: csg %0,%2,0(%3)\n" 192 + " jl 0b\n" 187 193 : "=&d" (old), "=m" (*(long *) ptr) 188 194 : "d" (x), "a" (ptr), "m" (*(long *) ptr) 189 - : "memory", "cc" ); 195 + : "memory", "cc"); 190 196 x = old; 191 197 break; 192 198 #endif /* __s390x__ */ ··· 218 224 shift = (3 ^ (addr & 3)) << 3; 219 225 addr ^= addr & 3; 220 226 asm volatile( 221 - " l %0,0(%4)\n" 222 - "0: nr %0,%5\n" 223 - " lr %1,%0\n" 224 - " or %0,%2\n" 225 - " or %1,%3\n" 226 - " cs %0,%1,0(%4)\n" 227 - " jnl 1f\n" 228 - " xr %1,%0\n" 229 - " nr %1,%5\n" 230 - " jnz 0b\n" 227 + " l %0,0(%4)\n" 228 + "0: nr %0,%5\n" 229 + " lr %1,%0\n" 230 + " or %0,%2\n" 231 + " or %1,%3\n" 232 + " cs %0,%1,0(%4)\n" 233 + " jnl 1f\n" 234 + " xr %1,%0\n" 235 + " nr %1,%5\n" 236 + " jnz 0b\n" 231 237 "1:" 232 238 : "=&d" (prev), "=&d" (tmp) 233 239 : "d" (old << shift), "d" (new << shift), "a" (ptr), 234 240 "d" (~(255 << shift)) 235 - : "memory", "cc" ); 241 + : "memory", "cc"); 236 242 return prev >> shift; 237 243 case 2: 238 244 addr = (unsigned long) ptr; 239 245 shift = (2 ^ (addr & 2)) << 3; 240 246 addr ^= addr & 2; 241 247 asm volatile( 242 - " l %0,0(%4)\n" 243 - "0: nr %0,%5\n" 244 - " lr %1,%0\n" 245 - " or %0,%2\n" 246 - " or %1,%3\n" 247 - " cs %0,%1,0(%4)\n" 248 - " jnl 1f\n" 249 - " xr %1,%0\n" 250 - " nr %1,%5\n" 251 - " jnz 0b\n" 248 + " l %0,0(%4)\n" 249 + "0: nr %0,%5\n" 250 + " lr %1,%0\n" 251 + " or %0,%2\n" 252 + " or %1,%3\n" 253 + " cs %0,%1,0(%4)\n" 254 + " jnl 1f\n" 255 + " xr %1,%0\n" 256 + " nr %1,%5\n" 257 + " jnz 0b\n" 252 258 "1:" 253 259 : "=&d" (prev), "=&d" (tmp) 254 260 : "d" (old << shift), "d" (new << shift), "a" (ptr), 255 261 "d" (~(65535 << shift)) 256 - : "memory", "cc" ); 262 + : "memory", "cc"); 257 263 return prev >> shift; 258 264 case 4: 259 - asm volatile ( 260 - " cs %0,%2,0(%3)\n" 265 + asm volatile( 266 + " cs %0,%2,0(%3)\n" 261 267 : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr) 262 - : "memory", "cc" ); 268 + : "memory", "cc"); 263 269 return prev; 264 270 #ifdef __s390x__ 265 271 case 8: 266 - asm volatile ( 267 - " csg %0,%2,0(%3)\n" 272 + asm volatile( 273 + " csg %0,%2,0(%3)\n" 268 274 : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr) 269 - : "memory", "cc" ); 275 + : "memory", "cc"); 270 276 return prev; 271 277 #endif /* __s390x__ */ 272 278 } ··· 283 289 * all memory ops have completed wrt other CPU's ( see 7-15 POP DJB ). 284 290 */ 285 291 286 - #define eieio() __asm__ __volatile__ ( "bcr 15,0" : : : "memory" ) 287 - # define SYNC_OTHER_CORES(x) eieio() 292 + #define eieio() asm volatile("bcr 15,0" : : : "memory") 293 + #define SYNC_OTHER_CORES(x) eieio() 288 294 #define mb() eieio() 289 295 #define rmb() eieio() 290 296 #define wmb() eieio() ··· 301 307 302 308 #ifdef __s390x__ 303 309 304 - #define __ctl_load(array, low, high) ({ \ 305 - typedef struct { char _[sizeof(array)]; } addrtype; \ 306 - __asm__ __volatile__ ( \ 307 - " bras 1,0f\n" \ 308 - " lctlg 0,0,0(%0)\n" \ 309 - "0: ex %1,0(1)" \ 310 - : : "a" (&array), "a" (((low)<<4)+(high)), \ 311 - "m" (*(addrtype *)(array)) : "1" ); \ 310 + #define __ctl_load(array, low, high) ({ \ 311 + typedef struct { char _[sizeof(array)]; } addrtype; \ 312 + asm volatile( \ 313 + " lctlg %1,%2,0(%0)\n" \ 314 + : : "a" (&array), "i" (low), "i" (high), \ 315 + "m" (*(addrtype *)(array))); \ 312 316 }) 313 317 314 - #define __ctl_store(array, low, high) ({ \ 315 - typedef struct { char _[sizeof(array)]; } addrtype; \ 316 - __asm__ __volatile__ ( \ 317 - " bras 1,0f\n" \ 318 - " stctg 0,0,0(%1)\n" \ 319 - "0: ex %2,0(1)" \ 320 - : "=m" (*(addrtype *)(array)) \ 321 - : "a" (&array), "a" (((low)<<4)+(high)) : "1" ); \ 318 + #define __ctl_store(array, low, high) ({ \ 319 + typedef struct { char _[sizeof(array)]; } addrtype; \ 320 + asm volatile( \ 321 + " stctg %2,%3,0(%1)\n" \ 322 + : "=m" (*(addrtype *)(array)) \ 323 + : "a" (&array), "i" (low), "i" (high)); \ 322 324 }) 323 - 324 - #define __ctl_set_bit(cr, bit) ({ \ 325 - __u8 __dummy[24]; \ 326 - __asm__ __volatile__ ( \ 327 - " bras 1,0f\n" /* skip indirect insns */ \ 328 - " stctg 0,0,0(%1)\n" \ 329 - " lctlg 0,0,0(%1)\n" \ 330 - "0: ex %2,0(1)\n" /* execute stctl */ \ 331 - " lg 0,0(%1)\n" \ 332 - " ogr 0,%3\n" /* set the bit */ \ 333 - " stg 0,0(%1)\n" \ 334 - "1: ex %2,6(1)" /* execute lctl */ \ 335 - : "=m" (__dummy) \ 336 - : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \ 337 - "a" (cr*17), "a" (1L<<(bit)) \ 338 - : "cc", "0", "1" ); \ 339 - }) 340 - 341 - #define __ctl_clear_bit(cr, bit) ({ \ 342 - __u8 __dummy[16]; \ 343 - __asm__ __volatile__ ( \ 344 - " bras 1,0f\n" /* skip indirect insns */ \ 345 - " stctg 0,0,0(%1)\n" \ 346 - " lctlg 0,0,0(%1)\n" \ 347 - "0: ex %2,0(1)\n" /* execute stctl */ \ 348 - " lg 0,0(%1)\n" \ 349 - " ngr 0,%3\n" /* set the bit */ \ 350 - " stg 0,0(%1)\n" \ 351 - "1: ex %2,6(1)" /* execute lctl */ \ 352 - : "=m" (__dummy) \ 353 - : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \ 354 - "a" (cr*17), "a" (~(1L<<(bit))) \ 355 - : "cc", "0", "1" ); \ 356 - }) 357 325 358 326 #else /* __s390x__ */ 359 327 360 - #define __ctl_load(array, low, high) ({ \ 361 - typedef struct { char _[sizeof(array)]; } addrtype; \ 362 - __asm__ __volatile__ ( \ 363 - " bras 1,0f\n" \ 364 - " lctl 0,0,0(%0)\n" \ 365 - "0: ex %1,0(1)" \ 366 - : : "a" (&array), "a" (((low)<<4)+(high)), \ 367 - "m" (*(addrtype *)(array)) : "1" ); \ 328 + #define __ctl_load(array, low, high) ({ \ 329 + typedef struct { char _[sizeof(array)]; } addrtype; \ 330 + asm volatile( \ 331 + " lctl %1,%2,0(%0)\n" \ 332 + : : "a" (&array), "i" (low), "i" (high), \ 333 + "m" (*(addrtype *)(array))); \ 334 + }) 335 + 336 + #define __ctl_store(array, low, high) ({ \ 337 + typedef struct { char _[sizeof(array)]; } addrtype; \ 338 + asm volatile( \ 339 + " stctl %2,%3,0(%1)\n" \ 340 + : "=m" (*(addrtype *)(array)) \ 341 + : "a" (&array), "i" (low), "i" (high)); \ 368 342 }) 369 343 370 - #define __ctl_store(array, low, high) ({ \ 371 - typedef struct { char _[sizeof(array)]; } addrtype; \ 372 - __asm__ __volatile__ ( \ 373 - " bras 1,0f\n" \ 374 - " stctl 0,0,0(%1)\n" \ 375 - "0: ex %2,0(1)" \ 376 - : "=m" (*(addrtype *)(array)) \ 377 - : "a" (&array), "a" (((low)<<4)+(high)): "1" ); \ 378 - }) 379 - 380 - #define __ctl_set_bit(cr, bit) ({ \ 381 - __u8 __dummy[16]; \ 382 - __asm__ __volatile__ ( \ 383 - " bras 1,0f\n" /* skip indirect insns */ \ 384 - " stctl 0,0,0(%1)\n" \ 385 - " lctl 0,0,0(%1)\n" \ 386 - "0: ex %2,0(1)\n" /* execute stctl */ \ 387 - " l 0,0(%1)\n" \ 388 - " or 0,%3\n" /* set the bit */ \ 389 - " st 0,0(%1)\n" \ 390 - "1: ex %2,4(1)" /* execute lctl */ \ 391 - : "=m" (__dummy) \ 392 - : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \ 393 - "a" (cr*17), "a" (1<<(bit)) \ 394 - : "cc", "0", "1" ); \ 395 - }) 396 - 397 - #define __ctl_clear_bit(cr, bit) ({ \ 398 - __u8 __dummy[16]; \ 399 - __asm__ __volatile__ ( \ 400 - " bras 1,0f\n" /* skip indirect insns */ \ 401 - " stctl 0,0,0(%1)\n" \ 402 - " lctl 0,0,0(%1)\n" \ 403 - "0: ex %2,0(1)\n" /* execute stctl */ \ 404 - " l 0,0(%1)\n" \ 405 - " nr 0,%3\n" /* set the bit */ \ 406 - " st 0,0(%1)\n" \ 407 - "1: ex %2,4(1)" /* execute lctl */ \ 408 - : "=m" (__dummy) \ 409 - : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \ 410 - "a" (cr*17), "a" (~(1<<(bit))) \ 411 - : "cc", "0", "1" ); \ 412 - }) 413 344 #endif /* __s390x__ */ 345 + 346 + #define __ctl_set_bit(cr, bit) ({ \ 347 + unsigned long __dummy; \ 348 + __ctl_store(__dummy, cr, cr); \ 349 + __dummy |= 1UL << (bit); \ 350 + __ctl_load(__dummy, cr, cr); \ 351 + }) 352 + 353 + #define __ctl_clear_bit(cr, bit) ({ \ 354 + unsigned long __dummy; \ 355 + __ctl_store(__dummy, cr, cr); \ 356 + __dummy &= ~(1UL << (bit)); \ 357 + __ctl_load(__dummy, cr, cr); \ 358 + }) 414 359 415 360 #include <linux/irqflags.h> 416 361 ··· 360 427 static inline void 361 428 __set_psw_mask(unsigned long mask) 362 429 { 363 - local_save_flags(mask); 364 - __load_psw_mask(mask); 430 + __load_psw_mask(mask | (__raw_local_irq_stosm(0x00) & ~(-1UL >> 8))); 365 431 } 366 432 367 433 #define local_mcck_enable() __set_psw_mask(PSW_KERNEL_BITS)
+10 -9
include/asm-s390/timex.h
··· 15 15 16 16 typedef unsigned long long cycles_t; 17 17 18 - static inline cycles_t get_cycles(void) 19 - { 20 - cycles_t cycles; 21 - 22 - __asm__ __volatile__ ("stck 0(%1)" : "=m" (cycles) : "a" (&cycles) : "cc"); 23 - return cycles >> 2; 24 - } 25 - 26 18 static inline unsigned long long get_clock (void) 27 19 { 28 20 unsigned long long clk; 29 21 30 - __asm__ __volatile__ ("stck 0(%1)" : "=m" (clk) : "a" (&clk) : "cc"); 22 + #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) 23 + asm volatile("stck %0" : "=Q" (clk) : : "cc"); 24 + #else /* __GNUC__ */ 25 + asm volatile("stck 0(%1)" : "=m" (clk) : "a" (&clk) : "cc"); 26 + #endif /* __GNUC__ */ 31 27 return clk; 28 + } 29 + 30 + static inline cycles_t get_cycles(void) 31 + { 32 + return (cycles_t) get_clock() >> 2; 32 33 } 33 34 34 35 #endif
+15 -15
include/asm-s390/tlbflush.h
··· 25 25 */ 26 26 27 27 #define local_flush_tlb() \ 28 - do { __asm__ __volatile__("ptlb": : :"memory"); } while (0) 28 + do { asm volatile("ptlb": : :"memory"); } while (0) 29 29 30 30 #ifndef CONFIG_SMP 31 31 ··· 68 68 69 69 static inline void global_flush_tlb(void) 70 70 { 71 + register unsigned long reg2 asm("2"); 72 + register unsigned long reg3 asm("3"); 73 + register unsigned long reg4 asm("4"); 74 + long dummy; 75 + 71 76 #ifndef __s390x__ 72 77 if (!MACHINE_HAS_CSP) { 73 78 smp_ptlb_all(); 74 79 return; 75 80 } 76 81 #endif /* __s390x__ */ 77 - { 78 - register unsigned long addr asm("4"); 79 - long dummy; 80 82 81 - dummy = 0; 82 - addr = ((unsigned long) &dummy) + 1; 83 - __asm__ __volatile__ ( 84 - " slr 2,2\n" 85 - " slr 3,3\n" 86 - " csp 2,%0" 87 - : : "a" (addr), "m" (dummy) : "cc", "2", "3" ); 88 - } 83 + dummy = 0; 84 + reg2 = reg3 = 0; 85 + reg4 = ((unsigned long) &dummy) + 1; 86 + asm volatile( 87 + " csp %0,%2" 88 + : : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" ); 89 89 } 90 90 91 91 /* ··· 102 102 if (unlikely(cpus_empty(mm->cpu_vm_mask))) 103 103 return; 104 104 if (MACHINE_HAS_IDTE) { 105 - asm volatile (".insn rrf,0xb98e0000,0,%0,%1,0" 106 - : : "a" (2048), 107 - "a" (__pa(mm->pgd)&PAGE_MASK) : "cc" ); 105 + asm volatile( 106 + " .insn rrf,0xb98e0000,0,%0,%1,0" 107 + : : "a" (2048), "a" (__pa(mm->pgd)&PAGE_MASK) : "cc"); 108 108 return; 109 109 } 110 110 preempt_disable();
+1 -12
include/asm-s390/uaccess.h
··· 38 38 #define get_ds() (KERNEL_DS) 39 39 #define get_fs() (current->thread.mm_segment) 40 40 41 - #ifdef __s390x__ 42 41 #define set_fs(x) \ 43 42 ({ \ 44 43 unsigned long __pto; \ 45 44 current->thread.mm_segment = (x); \ 46 45 __pto = current->thread.mm_segment.ar4 ? \ 47 46 S390_lowcore.user_asce : S390_lowcore.kernel_asce; \ 48 - asm volatile ("lctlg 7,7,%0" : : "m" (__pto) ); \ 47 + __ctl_load(__pto, 7, 7); \ 49 48 }) 50 - #else /* __s390x__ */ 51 - #define set_fs(x) \ 52 - ({ \ 53 - unsigned long __pto; \ 54 - current->thread.mm_segment = (x); \ 55 - __pto = current->thread.mm_segment.ar4 ? \ 56 - S390_lowcore.user_asce : S390_lowcore.kernel_asce; \ 57 - asm volatile ("lctl 7,7,%0" : : "m" (__pto) ); \ 58 - }) 59 - #endif /* __s390x__ */ 60 49 61 50 #define segment_eq(a,b) ((a).ar4 == (b).ar4) 62 51
+129 -129
include/asm-s390/unistd.h
··· 355 355 356 356 #define _svc_clobber "1", "cc", "memory" 357 357 358 - #define _syscall0(type,name) \ 359 - type name(void) { \ 360 - register long __svcres asm("2"); \ 361 - long __res; \ 362 - __asm__ __volatile__ ( \ 363 - " .if %1 < 256\n" \ 364 - " svc %b1\n" \ 365 - " .else\n" \ 366 - " la %%r1,%1\n" \ 367 - " svc 0\n" \ 368 - " .endif" \ 369 - : "=d" (__svcres) \ 370 - : "i" (__NR_##name) \ 371 - : _svc_clobber ); \ 372 - __res = __svcres; \ 373 - __syscall_return(type,__res); \ 358 + #define _syscall0(type,name) \ 359 + type name(void) { \ 360 + register long __svcres asm("2"); \ 361 + long __res; \ 362 + asm volatile( \ 363 + " .if %1 < 256\n" \ 364 + " svc %b1\n" \ 365 + " .else\n" \ 366 + " la %%r1,%1\n" \ 367 + " svc 0\n" \ 368 + " .endif" \ 369 + : "=d" (__svcres) \ 370 + : "i" (__NR_##name) \ 371 + : _svc_clobber); \ 372 + __res = __svcres; \ 373 + __syscall_return(type,__res); \ 374 374 } 375 375 376 - #define _syscall1(type,name,type1,arg1) \ 377 - type name(type1 arg1) { \ 378 - register type1 __arg1 asm("2") = arg1; \ 379 - register long __svcres asm("2"); \ 380 - long __res; \ 381 - __asm__ __volatile__ ( \ 382 - " .if %1 < 256\n" \ 383 - " svc %b1\n" \ 384 - " .else\n" \ 385 - " la %%r1,%1\n" \ 386 - " svc 0\n" \ 387 - " .endif" \ 388 - : "=d" (__svcres) \ 389 - : "i" (__NR_##name), \ 390 - "0" (__arg1) \ 391 - : _svc_clobber ); \ 392 - __res = __svcres; \ 393 - __syscall_return(type,__res); \ 376 + #define _syscall1(type,name,type1,arg1) \ 377 + type name(type1 arg1) { \ 378 + register type1 __arg1 asm("2") = arg1; \ 379 + register long __svcres asm("2"); \ 380 + long __res; \ 381 + asm volatile( \ 382 + " .if %1 < 256\n" \ 383 + " svc %b1\n" \ 384 + " .else\n" \ 385 + " la %%r1,%1\n" \ 386 + " svc 0\n" \ 387 + " .endif" \ 388 + : "=d" (__svcres) \ 389 + : "i" (__NR_##name), \ 390 + "0" (__arg1) \ 391 + : _svc_clobber); \ 392 + __res = __svcres; \ 393 + __syscall_return(type,__res); \ 394 394 } 395 395 396 - #define _syscall2(type,name,type1,arg1,type2,arg2) \ 397 - type name(type1 arg1, type2 arg2) { \ 398 - register type1 __arg1 asm("2") = arg1; \ 399 - register type2 __arg2 asm("3") = arg2; \ 400 - register long __svcres asm("2"); \ 401 - long __res; \ 402 - __asm__ __volatile__ ( \ 403 - " .if %1 < 256\n" \ 404 - " svc %b1\n" \ 405 - " .else\n" \ 406 - " la %%r1,%1\n" \ 407 - " svc 0\n" \ 408 - " .endif" \ 409 - : "=d" (__svcres) \ 410 - : "i" (__NR_##name), \ 411 - "0" (__arg1), \ 412 - "d" (__arg2) \ 413 - : _svc_clobber ); \ 414 - __res = __svcres; \ 415 - __syscall_return(type,__res); \ 396 + #define _syscall2(type,name,type1,arg1,type2,arg2) \ 397 + type name(type1 arg1, type2 arg2) { \ 398 + register type1 __arg1 asm("2") = arg1; \ 399 + register type2 __arg2 asm("3") = arg2; \ 400 + register long __svcres asm("2"); \ 401 + long __res; \ 402 + asm volatile( \ 403 + " .if %1 < 256\n" \ 404 + " svc %b1\n" \ 405 + " .else\n" \ 406 + " la %%r1,%1\n" \ 407 + " svc 0\n" \ 408 + " .endif" \ 409 + : "=d" (__svcres) \ 410 + : "i" (__NR_##name), \ 411 + "0" (__arg1), \ 412 + "d" (__arg2) \ 413 + : _svc_clobber ); \ 414 + __res = __svcres; \ 415 + __syscall_return(type,__res); \ 416 416 } 417 417 418 - #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)\ 419 - type name(type1 arg1, type2 arg2, type3 arg3) { \ 420 - register type1 __arg1 asm("2") = arg1; \ 421 - register type2 __arg2 asm("3") = arg2; \ 422 - register type3 __arg3 asm("4") = arg3; \ 423 - register long __svcres asm("2"); \ 424 - long __res; \ 425 - __asm__ __volatile__ ( \ 426 - " .if %1 < 256\n" \ 427 - " svc %b1\n" \ 428 - " .else\n" \ 429 - " la %%r1,%1\n" \ 430 - " svc 0\n" \ 431 - " .endif" \ 432 - : "=d" (__svcres) \ 433 - : "i" (__NR_##name), \ 434 - "0" (__arg1), \ 435 - "d" (__arg2), \ 436 - "d" (__arg3) \ 437 - : _svc_clobber ); \ 438 - __res = __svcres; \ 439 - __syscall_return(type,__res); \ 418 + #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ 419 + type name(type1 arg1, type2 arg2, type3 arg3) { \ 420 + register type1 __arg1 asm("2") = arg1; \ 421 + register type2 __arg2 asm("3") = arg2; \ 422 + register type3 __arg3 asm("4") = arg3; \ 423 + register long __svcres asm("2"); \ 424 + long __res; \ 425 + asm volatile( \ 426 + " .if %1 < 256\n" \ 427 + " svc %b1\n" \ 428 + " .else\n" \ 429 + " la %%r1,%1\n" \ 430 + " svc 0\n" \ 431 + " .endif" \ 432 + : "=d" (__svcres) \ 433 + : "i" (__NR_##name), \ 434 + "0" (__arg1), \ 435 + "d" (__arg2), \ 436 + "d" (__arg3) \ 437 + : _svc_clobber); \ 438 + __res = __svcres; \ 439 + __syscall_return(type,__res); \ 440 440 } 441 441 442 - #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,\ 443 - type4,name4) \ 444 - type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \ 445 - register type1 __arg1 asm("2") = arg1; \ 446 - register type2 __arg2 asm("3") = arg2; \ 447 - register type3 __arg3 asm("4") = arg3; \ 448 - register type4 __arg4 asm("5") = arg4; \ 449 - register long __svcres asm("2"); \ 450 - long __res; \ 451 - __asm__ __volatile__ ( \ 452 - " .if %1 < 256\n" \ 453 - " svc %b1\n" \ 454 - " .else\n" \ 455 - " la %%r1,%1\n" \ 456 - " svc 0\n" \ 457 - " .endif" \ 458 - : "=d" (__svcres) \ 459 - : "i" (__NR_##name), \ 460 - "0" (__arg1), \ 461 - "d" (__arg2), \ 462 - "d" (__arg3), \ 463 - "d" (__arg4) \ 464 - : _svc_clobber ); \ 465 - __res = __svcres; \ 466 - __syscall_return(type,__res); \ 442 + #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3, \ 443 + type4,name4) \ 444 + type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \ 445 + register type1 __arg1 asm("2") = arg1; \ 446 + register type2 __arg2 asm("3") = arg2; \ 447 + register type3 __arg3 asm("4") = arg3; \ 448 + register type4 __arg4 asm("5") = arg4; \ 449 + register long __svcres asm("2"); \ 450 + long __res; \ 451 + asm volatile( \ 452 + " .if %1 < 256\n" \ 453 + " svc %b1\n" \ 454 + " .else\n" \ 455 + " la %%r1,%1\n" \ 456 + " svc 0\n" \ 457 + " .endif" \ 458 + : "=d" (__svcres) \ 459 + : "i" (__NR_##name), \ 460 + "0" (__arg1), \ 461 + "d" (__arg2), \ 462 + "d" (__arg3), \ 463 + "d" (__arg4) \ 464 + : _svc_clobber); \ 465 + __res = __svcres; \ 466 + __syscall_return(type,__res); \ 467 467 } 468 468 469 - #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,\ 470 - type4,name4,type5,name5) \ 471 - type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 472 - type5 arg5) { \ 473 - register type1 __arg1 asm("2") = arg1; \ 474 - register type2 __arg2 asm("3") = arg2; \ 475 - register type3 __arg3 asm("4") = arg3; \ 476 - register type4 __arg4 asm("5") = arg4; \ 477 - register type5 __arg5 asm("6") = arg5; \ 478 - register long __svcres asm("2"); \ 479 - long __res; \ 480 - __asm__ __volatile__ ( \ 481 - " .if %1 < 256\n" \ 482 - " svc %b1\n" \ 483 - " .else\n" \ 484 - " la %%r1,%1\n" \ 485 - " svc 0\n" \ 486 - " .endif" \ 487 - : "=d" (__svcres) \ 488 - : "i" (__NR_##name), \ 489 - "0" (__arg1), \ 490 - "d" (__arg2), \ 491 - "d" (__arg3), \ 492 - "d" (__arg4), \ 493 - "d" (__arg5) \ 494 - : _svc_clobber ); \ 495 - __res = __svcres; \ 496 - __syscall_return(type,__res); \ 469 + #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3, \ 470 + type4,name4,type5,name5) \ 471 + type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ 472 + type5 arg5) { \ 473 + register type1 __arg1 asm("2") = arg1; \ 474 + register type2 __arg2 asm("3") = arg2; \ 475 + register type3 __arg3 asm("4") = arg3; \ 476 + register type4 __arg4 asm("5") = arg4; \ 477 + register type5 __arg5 asm("6") = arg5; \ 478 + register long __svcres asm("2"); \ 479 + long __res; \ 480 + asm volatile( \ 481 + " .if %1 < 256\n" \ 482 + " svc %b1\n" \ 483 + " .else\n" \ 484 + " la %%r1,%1\n" \ 485 + " svc 0\n" \ 486 + " .endif" \ 487 + : "=d" (__svcres) \ 488 + : "i" (__NR_##name), \ 489 + "0" (__arg1), \ 490 + "d" (__arg2), \ 491 + "d" (__arg3), \ 492 + "d" (__arg4), \ 493 + "d" (__arg5) \ 494 + : _svc_clobber); \ 495 + __res = __svcres; \ 496 + __syscall_return(type,__res); \ 497 497 } 498 498 499 499 #define __ARCH_WANT_IPC_PARSE_VERSION