Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[S390] use inline assembly contraints available with gcc 3.3.3

Drop support to compile the kernel with gcc versions older than 3.3.3.
This allows us to use the "Q" inline assembly contraint on some more
inline assemblies without duplicating a lot of complex code (e.g. __xchg
and __cmpxchg). The distinction for older gcc versions can be removed
which saves a few lines and simplifies the code.

Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

authored by

Martin Schwidefsky and committed by
Martin Schwidefsky
987bcdac d1bf8590

+226 -388
+11 -75
arch/s390/include/asm/atomic.h
··· 18 18 19 19 #define ATOMIC_INIT(i) { (i) } 20 20 21 - #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) 22 - 23 21 #define __CS_LOOP(ptr, op_val, op_string) ({ \ 24 22 int old_val, new_val; \ 25 23 asm volatile( \ ··· 32 34 : "cc", "memory"); \ 33 35 new_val; \ 34 36 }) 35 - 36 - #else /* __GNUC__ */ 37 - 38 - #define __CS_LOOP(ptr, op_val, op_string) ({ \ 39 - int old_val, new_val; \ 40 - asm volatile( \ 41 - " l %0,0(%3)\n" \ 42 - "0: lr %1,%0\n" \ 43 - op_string " %1,%4\n" \ 44 - " cs %0,%1,0(%3)\n" \ 45 - " jl 0b" \ 46 - : "=&d" (old_val), "=&d" (new_val), \ 47 - "=m" (((atomic_t *)(ptr))->counter) \ 48 - : "a" (ptr), "d" (op_val), \ 49 - "m" (((atomic_t *)(ptr))->counter) \ 50 - : "cc", "memory"); \ 51 - new_val; \ 52 - }) 53 - 54 - #endif /* __GNUC__ */ 55 37 56 38 static inline int atomic_read(const atomic_t *v) 57 39 { ··· 79 101 80 102 static inline int atomic_cmpxchg(atomic_t *v, int old, int new) 81 103 { 82 - #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) 83 104 asm volatile( 84 105 " cs %0,%2,%1" 85 106 : "+d" (old), "=Q" (v->counter) 86 107 : "d" (new), "Q" (v->counter) 87 108 : "cc", "memory"); 88 - #else /* __GNUC__ */ 89 - asm volatile( 90 - " cs %0,%3,0(%2)" 91 - : "+d" (old), "=m" (v->counter) 92 - : "a" (v), "d" (new), "m" (v->counter) 93 - : "cc", "memory"); 94 - #endif /* __GNUC__ */ 95 109 return old; 96 110 } 97 111 ··· 110 140 111 141 #ifdef CONFIG_64BIT 112 142 113 - #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) 114 - 115 143 #define __CSG_LOOP(ptr, op_val, op_string) ({ \ 116 144 long long old_val, new_val; \ 117 145 asm volatile( \ ··· 124 156 : "cc", "memory"); \ 125 157 new_val; \ 126 158 }) 127 - 128 - #else /* __GNUC__ */ 129 - 130 - #define __CSG_LOOP(ptr, op_val, op_string) ({ \ 131 - long long old_val, new_val; \ 132 - asm volatile( \ 133 - " lg %0,0(%3)\n" \ 134 - "0: lgr %1,%0\n" \ 135 - op_string " %1,%4\n" \ 136 - " csg %0,%1,0(%3)\n" \ 137 - " jl 0b" \ 138 - : "=&d" (old_val), "=&d" (new_val), \ 139 - "=m" (((atomic_t *)(ptr))->counter) \ 140 - : "a" (ptr), "d" (op_val), \ 141 - "m" (((atomic_t *)(ptr))->counter) \ 142 - : "cc", "memory"); \ 143 - new_val; \ 144 - }) 145 - 146 - #endif /* __GNUC__ */ 147 159 148 160 static inline long long atomic64_read(const atomic64_t *v) 149 161 { ··· 162 214 static inline long long atomic64_cmpxchg(atomic64_t *v, 163 215 long long old, long long new) 164 216 { 165 - #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) 166 217 asm volatile( 167 218 " csg %0,%2,%1" 168 219 : "+d" (old), "=Q" (v->counter) 169 220 : "d" (new), "Q" (v->counter) 170 221 : "cc", "memory"); 171 - #else /* __GNUC__ */ 172 - asm volatile( 173 - " csg %0,%3,0(%2)" 174 - : "+d" (old), "=m" (v->counter) 175 - : "a" (v), "d" (new), "m" (v->counter) 176 - : "cc", "memory"); 177 - #endif /* __GNUC__ */ 178 222 return old; 179 223 } 180 224 ··· 183 243 register_pair rp; 184 244 185 245 asm volatile( 186 - " lm %0,%N0,0(%1)" 187 - : "=&d" (rp) 188 - : "a" (&v->counter), "m" (v->counter) 189 - ); 246 + " lm %0,%N0,%1" 247 + : "=&d" (rp) : "Q" (v->counter) ); 190 248 return rp.pair; 191 249 } 192 250 ··· 193 255 register_pair rp = {.pair = i}; 194 256 195 257 asm volatile( 196 - " stm %1,%N1,0(%2)" 197 - : "=m" (v->counter) 198 - : "d" (rp), "a" (&v->counter) 199 - ); 258 + " stm %1,%N1,%0" 259 + : "=Q" (v->counter) : "d" (rp) ); 200 260 } 201 261 202 262 static inline long long atomic64_xchg(atomic64_t *v, long long new) ··· 203 267 register_pair rp_old; 204 268 205 269 asm volatile( 206 - " lm %0,%N0,0(%2)\n" 207 - "0: cds %0,%3,0(%2)\n" 270 + " lm %0,%N0,%1\n" 271 + "0: cds %0,%2,%1\n" 208 272 " jl 0b\n" 209 - : "=&d" (rp_old), "+m" (v->counter) 210 - : "a" (&v->counter), "d" (rp_new) 273 + : "=&d" (rp_old), "=Q" (v->counter) 274 + : "d" (rp_new), "Q" (v->counter) 211 275 : "cc"); 212 276 return rp_old.pair; 213 277 } ··· 219 283 register_pair rp_new = {.pair = new}; 220 284 221 285 asm volatile( 222 - " cds %0,%3,0(%2)" 223 - : "+&d" (rp_old), "+m" (v->counter) 224 - : "a" (&v->counter), "d" (rp_new) 286 + " cds %0,%2,%1" 287 + : "+&d" (rp_old), "=Q" (v->counter) 288 + : "d" (rp_new), "Q" (v->counter) 225 289 : "cc"); 226 290 return rp_old.pair; 227 291 }
+20 -63
arch/s390/include/asm/bitops.h
··· 71 71 #define __BITOPS_AND "nr" 72 72 #define __BITOPS_XOR "xr" 73 73 74 - #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) 75 - 76 74 #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ 77 75 asm volatile( \ 78 76 " l %0,%2\n" \ ··· 83 85 : "d" (__val), "Q" (*(unsigned long *) __addr) \ 84 86 : "cc"); 85 87 86 - #else /* __GNUC__ */ 87 - 88 - #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ 89 - asm volatile( \ 90 - " l %0,0(%4)\n" \ 91 - "0: lr %1,%0\n" \ 92 - __op_string " %1,%3\n" \ 93 - " cs %0,%1,0(%4)\n" \ 94 - " jl 0b" \ 95 - : "=&d" (__old), "=&d" (__new), \ 96 - "=m" (*(unsigned long *) __addr) \ 97 - : "d" (__val), "a" (__addr), \ 98 - "m" (*(unsigned long *) __addr) : "cc"); 99 - 100 - #endif /* __GNUC__ */ 101 - 102 88 #else /* __s390x__ */ 103 89 104 90 #define __BITOPS_ALIGN 7 ··· 90 108 #define __BITOPS_OR "ogr" 91 109 #define __BITOPS_AND "ngr" 92 110 #define __BITOPS_XOR "xgr" 93 - 94 - #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) 95 111 96 112 #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ 97 113 asm volatile( \ ··· 102 122 "=Q" (*(unsigned long *) __addr) \ 103 123 : "d" (__val), "Q" (*(unsigned long *) __addr) \ 104 124 : "cc"); 105 - 106 - #else /* __GNUC__ */ 107 - 108 - #define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ 109 - asm volatile( \ 110 - " lg %0,0(%4)\n" \ 111 - "0: lgr %1,%0\n" \ 112 - __op_string " %1,%3\n" \ 113 - " csg %0,%1,0(%4)\n" \ 114 - " jl 0b" \ 115 - : "=&d" (__old), "=&d" (__new), \ 116 - "=m" (*(unsigned long *) __addr) \ 117 - : "d" (__val), "a" (__addr), \ 118 - "m" (*(unsigned long *) __addr) : "cc"); 119 - 120 - 121 - #endif /* __GNUC__ */ 122 125 123 126 #endif /* __s390x__ */ 124 127 ··· 224 261 225 262 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 226 263 asm volatile( 227 - " oc 0(1,%1),0(%2)" 228 - : "=m" (*(char *) addr) : "a" (addr), 229 - "a" (_oi_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc" ); 264 + " oc %O0(1,%R0),%1" 265 + : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" ); 230 266 } 231 267 232 268 static inline void ··· 252 290 253 291 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 254 292 asm volatile( 255 - " nc 0(1,%1),0(%2)" 256 - : "=m" (*(char *) addr) : "a" (addr), 257 - "a" (_ni_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc"); 293 + " nc %O0(1,%R0),%1" 294 + : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc" ); 258 295 } 259 296 260 297 static inline void ··· 279 318 280 319 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 281 320 asm volatile( 282 - " xc 0(1,%1),0(%2)" 283 - : "=m" (*(char *) addr) : "a" (addr), 284 - "a" (_oi_bitmap + (nr & 7)), "m" (*(char *) addr) : "cc" ); 321 + " xc %O0(1,%R0),%1" 322 + : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" ); 285 323 } 286 324 287 325 static inline void ··· 309 349 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 310 350 ch = *(unsigned char *) addr; 311 351 asm volatile( 312 - " oc 0(1,%1),0(%2)" 313 - : "=m" (*(char *) addr) 314 - : "a" (addr), "a" (_oi_bitmap + (nr & 7)), 315 - "m" (*(char *) addr) : "cc", "memory"); 352 + " oc %O0(1,%R0),%1" 353 + : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) 354 + : "cc", "memory"); 316 355 return (ch >> (nr & 7)) & 1; 317 356 } 318 357 #define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y) ··· 328 369 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 329 370 ch = *(unsigned char *) addr; 330 371 asm volatile( 331 - " nc 0(1,%1),0(%2)" 332 - : "=m" (*(char *) addr) 333 - : "a" (addr), "a" (_ni_bitmap + (nr & 7)), 334 - "m" (*(char *) addr) : "cc", "memory"); 372 + " nc %O0(1,%R0),%1" 373 + : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) 374 + : "cc", "memory"); 335 375 return (ch >> (nr & 7)) & 1; 336 376 } 337 377 #define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y) ··· 347 389 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 348 390 ch = *(unsigned char *) addr; 349 391 asm volatile( 350 - " xc 0(1,%1),0(%2)" 351 - : "=m" (*(char *) addr) 352 - : "a" (addr), "a" (_oi_bitmap + (nr & 7)), 353 - "m" (*(char *) addr) : "cc", "memory"); 392 + " xc %O0(1,%R0),%1" 393 + : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) 394 + : "cc", "memory"); 354 395 return (ch >> (nr & 7)) & 1; 355 396 } 356 397 #define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y) ··· 548 591 p = (unsigned long *)((unsigned long) p + offset); 549 592 #ifndef __s390x__ 550 593 asm volatile( 551 - " ic %0,0(%1)\n" 552 - " icm %0,2,1(%1)\n" 553 - " icm %0,4,2(%1)\n" 554 - " icm %0,8,3(%1)" 555 - : "=&d" (word) : "a" (p), "m" (*p) : "cc"); 594 + " ic %0,%O1(%R1)\n" 595 + " icm %0,2,%O1+1(%R1)\n" 596 + " icm %0,4,%O1+2(%R1)\n" 597 + " icm %0,8,%O1+3(%R1)" 598 + : "=&d" (word) : "Q" (*p) : "cc"); 556 599 #else 557 600 asm volatile( 558 601 " lrvg %0,%1"
+6 -6
arch/s390/include/asm/etr.h
··· 145 145 int rc = -ENOSYS; 146 146 147 147 asm volatile( 148 - " .insn s,0xb2160000,0(%2)\n" 148 + " .insn s,0xb2160000,%1\n" 149 149 "0: la %0,0\n" 150 150 "1:\n" 151 151 EX_TABLE(0b,1b) 152 - : "+d" (rc) : "m" (*ctrl), "a" (ctrl)); 152 + : "+d" (rc) : "Q" (*ctrl)); 153 153 return rc; 154 154 } 155 155 ··· 159 159 int rc = -ENOSYS; 160 160 161 161 asm volatile( 162 - " .insn s,0xb2170000,0(%2)\n" 162 + " .insn s,0xb2170000,%1\n" 163 163 "0: la %0,0\n" 164 164 "1:\n" 165 165 EX_TABLE(0b,1b) 166 - : "+d" (rc) : "m" (*aib), "a" (aib)); 166 + : "+d" (rc) : "Q" (*aib)); 167 167 return rc; 168 168 } 169 169 ··· 174 174 int rc = -ENOSYS; 175 175 176 176 asm volatile( 177 - " .insn s,0xb2b30000,0(%2)\n" 177 + " .insn s,0xb2b30000,%1\n" 178 178 "0: la %0,0\n" 179 179 "1:\n" 180 180 EX_TABLE(0b,1b) 181 - : "+d" (rc) : "m" (*aib), "a" (aib), "d" (reg0)); 181 + : "+d" (rc) : "Q" (*aib), "d" (reg0)); 182 182 return rc; 183 183 } 184 184
-36
arch/s390/include/asm/irqflags.h
··· 8 8 9 9 #include <linux/types.h> 10 10 11 - #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) 12 - 13 11 /* store then or system mask. */ 14 12 #define __raw_local_irq_stosm(__or) \ 15 13 ({ \ ··· 33 35 ({ \ 34 36 asm volatile("ssm %0" : : "Q" (__mask) : "memory"); \ 35 37 }) 36 - 37 - #else /* __GNUC__ */ 38 - 39 - /* store then or system mask. */ 40 - #define __raw_local_irq_stosm(__or) \ 41 - ({ \ 42 - unsigned long __mask; \ 43 - asm volatile( \ 44 - " stosm 0(%1),%2" \ 45 - : "=m" (__mask) \ 46 - : "a" (&__mask), "i" (__or) : "memory"); \ 47 - __mask; \ 48 - }) 49 - 50 - /* store then and system mask. */ 51 - #define __raw_local_irq_stnsm(__and) \ 52 - ({ \ 53 - unsigned long __mask; \ 54 - asm volatile( \ 55 - " stnsm 0(%1),%2" \ 56 - : "=m" (__mask) \ 57 - : "a" (&__mask), "i" (__and) : "memory"); \ 58 - __mask; \ 59 - }) 60 - 61 - /* set system mask. */ 62 - #define __raw_local_irq_ssm(__mask) \ 63 - ({ \ 64 - asm volatile( \ 65 - " ssm 0(%0)" \ 66 - : : "a" (&__mask), "m" (__mask) : "memory"); \ 67 - }) 68 - 69 - #endif /* __GNUC__ */ 70 38 71 39 /* interrupt control.. */ 72 40 static inline unsigned long raw_local_irq_enable(void)
+9 -9
arch/s390/include/asm/processor.h
··· 28 28 29 29 static inline void get_cpu_id(struct cpuid *ptr) 30 30 { 31 - asm volatile("stidp 0(%1)" : "=m" (*ptr) : "a" (ptr)); 31 + asm volatile("stidp %0" : "=Q" (*ptr)); 32 32 } 33 33 34 34 extern void s390_adjust_jiffies(void); ··· 184 184 static inline void __load_psw(psw_t psw) 185 185 { 186 186 #ifndef __s390x__ 187 - asm volatile("lpsw 0(%0)" : : "a" (&psw), "m" (psw) : "cc"); 187 + asm volatile("lpsw %0" : : "Q" (psw) : "cc"); 188 188 #else 189 - asm volatile("lpswe 0(%0)" : : "a" (&psw), "m" (psw) : "cc"); 189 + asm volatile("lpswe %0" : : "Q" (psw) : "cc"); 190 190 #endif 191 191 } 192 192 ··· 206 206 asm volatile( 207 207 " basr %0,0\n" 208 208 "0: ahi %0,1f-0b\n" 209 - " st %0,4(%1)\n" 210 - " lpsw 0(%1)\n" 209 + " st %0,%O1+4(%R1)\n" 210 + " lpsw %1\n" 211 211 "1:" 212 - : "=&d" (addr) : "a" (&psw), "m" (psw) : "memory", "cc"); 212 + : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc"); 213 213 #else /* __s390x__ */ 214 214 asm volatile( 215 215 " larl %0,1f\n" 216 - " stg %0,8(%1)\n" 217 - " lpswe 0(%1)\n" 216 + " stg %0,%O1+8(%R1)\n" 217 + " lpswe %1\n" 218 218 "1:" 219 - : "=&d" (addr) : "a" (&psw), "m" (psw) : "memory", "cc"); 219 + : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc"); 220 220 #endif /* __s390x__ */ 221 221 } 222 222
+73 -74
arch/s390/include/asm/rwsem.h
··· 124 124 125 125 asm volatile( 126 126 #ifndef __s390x__ 127 - " l %0,0(%3)\n" 127 + " l %0,%2\n" 128 128 "0: lr %1,%0\n" 129 - " ahi %1,%5\n" 130 - " cs %0,%1,0(%3)\n" 129 + " ahi %1,%4\n" 130 + " cs %0,%1,%2\n" 131 131 " jl 0b" 132 132 #else /* __s390x__ */ 133 - " lg %0,0(%3)\n" 133 + " lg %0,%2\n" 134 134 "0: lgr %1,%0\n" 135 - " aghi %1,%5\n" 136 - " csg %0,%1,0(%3)\n" 135 + " aghi %1,%4\n" 136 + " csg %0,%1,%2\n" 137 137 " jl 0b" 138 138 #endif /* __s390x__ */ 139 - : "=&d" (old), "=&d" (new), "=m" (sem->count) 140 - : "a" (&sem->count), "m" (sem->count), 141 - "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory"); 139 + : "=&d" (old), "=&d" (new), "=Q" (sem->count) 140 + : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS) 141 + : "cc", "memory"); 142 142 if (old < 0) 143 143 rwsem_down_read_failed(sem); 144 144 } ··· 152 152 153 153 asm volatile( 154 154 #ifndef __s390x__ 155 - " l %0,0(%3)\n" 155 + " l %0,%2\n" 156 156 "0: ltr %1,%0\n" 157 157 " jm 1f\n" 158 - " ahi %1,%5\n" 159 - " cs %0,%1,0(%3)\n" 158 + " ahi %1,%4\n" 159 + " cs %0,%1,%2\n" 160 160 " jl 0b\n" 161 161 "1:" 162 162 #else /* __s390x__ */ 163 - " lg %0,0(%3)\n" 163 + " lg %0,%2\n" 164 164 "0: ltgr %1,%0\n" 165 165 " jm 1f\n" 166 - " aghi %1,%5\n" 167 - " csg %0,%1,0(%3)\n" 166 + " aghi %1,%4\n" 167 + " csg %0,%1,%2\n" 168 168 " jl 0b\n" 169 169 "1:" 170 170 #endif /* __s390x__ */ 171 - : "=&d" (old), "=&d" (new), "=m" (sem->count) 172 - : "a" (&sem->count), "m" (sem->count), 173 - "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory"); 171 + : "=&d" (old), "=&d" (new), "=Q" (sem->count) 172 + : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS) 173 + : "cc", "memory"); 174 174 return old >= 0 ? 1 : 0; 175 175 } 176 176 ··· 184 184 tmp = RWSEM_ACTIVE_WRITE_BIAS; 185 185 asm volatile( 186 186 #ifndef __s390x__ 187 - " l %0,0(%3)\n" 187 + " l %0,%2\n" 188 188 "0: lr %1,%0\n" 189 - " a %1,%5\n" 190 - " cs %0,%1,0(%3)\n" 189 + " a %1,%4\n" 190 + " cs %0,%1,%2\n" 191 191 " jl 0b" 192 192 #else /* __s390x__ */ 193 - " lg %0,0(%3)\n" 193 + " lg %0,%2\n" 194 194 "0: lgr %1,%0\n" 195 - " ag %1,%5\n" 196 - " csg %0,%1,0(%3)\n" 195 + " ag %1,%4\n" 196 + " csg %0,%1,%2\n" 197 197 " jl 0b" 198 198 #endif /* __s390x__ */ 199 - : "=&d" (old), "=&d" (new), "=m" (sem->count) 200 - : "a" (&sem->count), "m" (sem->count), "m" (tmp) 199 + : "=&d" (old), "=&d" (new), "=Q" (sem->count) 200 + : "Q" (sem->count), "m" (tmp) 201 201 : "cc", "memory"); 202 202 if (old != 0) 203 203 rwsem_down_write_failed(sem); ··· 217 217 218 218 asm volatile( 219 219 #ifndef __s390x__ 220 - " l %0,0(%2)\n" 220 + " l %0,%1\n" 221 221 "0: ltr %0,%0\n" 222 222 " jnz 1f\n" 223 - " cs %0,%4,0(%2)\n" 223 + " cs %0,%3,%1\n" 224 224 " jl 0b\n" 225 225 #else /* __s390x__ */ 226 - " lg %0,0(%2)\n" 226 + " lg %0,%1\n" 227 227 "0: ltgr %0,%0\n" 228 228 " jnz 1f\n" 229 - " csg %0,%4,0(%2)\n" 229 + " csg %0,%3,%1\n" 230 230 " jl 0b\n" 231 231 #endif /* __s390x__ */ 232 232 "1:" 233 - : "=&d" (old), "=m" (sem->count) 234 - : "a" (&sem->count), "m" (sem->count), 235 - "d" (RWSEM_ACTIVE_WRITE_BIAS) : "cc", "memory"); 233 + : "=&d" (old), "=Q" (sem->count) 234 + : "Q" (sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS) 235 + : "cc", "memory"); 236 236 return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0; 237 237 } 238 238 ··· 245 245 246 246 asm volatile( 247 247 #ifndef __s390x__ 248 - " l %0,0(%3)\n" 248 + " l %0,%2\n" 249 249 "0: lr %1,%0\n" 250 - " ahi %1,%5\n" 251 - " cs %0,%1,0(%3)\n" 250 + " ahi %1,%4\n" 251 + " cs %0,%1,%2\n" 252 252 " jl 0b" 253 253 #else /* __s390x__ */ 254 - " lg %0,0(%3)\n" 254 + " lg %0,%2\n" 255 255 "0: lgr %1,%0\n" 256 - " aghi %1,%5\n" 257 - " csg %0,%1,0(%3)\n" 256 + " aghi %1,%4\n" 257 + " csg %0,%1,%2\n" 258 258 " jl 0b" 259 259 #endif /* __s390x__ */ 260 - : "=&d" (old), "=&d" (new), "=m" (sem->count) 261 - : "a" (&sem->count), "m" (sem->count), 262 - "i" (-RWSEM_ACTIVE_READ_BIAS) 260 + : "=&d" (old), "=&d" (new), "=Q" (sem->count) 261 + : "Q" (sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS) 263 262 : "cc", "memory"); 264 263 if (new < 0) 265 264 if ((new & RWSEM_ACTIVE_MASK) == 0) ··· 275 276 tmp = -RWSEM_ACTIVE_WRITE_BIAS; 276 277 asm volatile( 277 278 #ifndef __s390x__ 278 - " l %0,0(%3)\n" 279 + " l %0,%2\n" 279 280 "0: lr %1,%0\n" 280 - " a %1,%5\n" 281 - " cs %0,%1,0(%3)\n" 281 + " a %1,%4\n" 282 + " cs %0,%1,%2\n" 282 283 " jl 0b" 283 284 #else /* __s390x__ */ 284 - " lg %0,0(%3)\n" 285 + " lg %0,%2\n" 285 286 "0: lgr %1,%0\n" 286 - " ag %1,%5\n" 287 - " csg %0,%1,0(%3)\n" 287 + " ag %1,%4\n" 288 + " csg %0,%1,%2\n" 288 289 " jl 0b" 289 290 #endif /* __s390x__ */ 290 - : "=&d" (old), "=&d" (new), "=m" (sem->count) 291 - : "a" (&sem->count), "m" (sem->count), "m" (tmp) 291 + : "=&d" (old), "=&d" (new), "=Q" (sem->count) 292 + : "Q" (sem->count), "m" (tmp) 292 293 : "cc", "memory"); 293 294 if (new < 0) 294 295 if ((new & RWSEM_ACTIVE_MASK) == 0) ··· 305 306 tmp = -RWSEM_WAITING_BIAS; 306 307 asm volatile( 307 308 #ifndef __s390x__ 308 - " l %0,0(%3)\n" 309 + " l %0,%2\n" 309 310 "0: lr %1,%0\n" 310 - " a %1,%5\n" 311 - " cs %0,%1,0(%3)\n" 311 + " a %1,%4\n" 312 + " cs %0,%1,%2\n" 312 313 " jl 0b" 313 314 #else /* __s390x__ */ 314 - " lg %0,0(%3)\n" 315 + " lg %0,%2\n" 315 316 "0: lgr %1,%0\n" 316 - " ag %1,%5\n" 317 - " csg %0,%1,0(%3)\n" 317 + " ag %1,%4\n" 318 + " csg %0,%1,%2\n" 318 319 " jl 0b" 319 320 #endif /* __s390x__ */ 320 - : "=&d" (old), "=&d" (new), "=m" (sem->count) 321 - : "a" (&sem->count), "m" (sem->count), "m" (tmp) 321 + : "=&d" (old), "=&d" (new), "=Q" (sem->count) 322 + : "Q" (sem->count), "m" (tmp) 322 323 : "cc", "memory"); 323 324 if (new > 1) 324 325 rwsem_downgrade_wake(sem); ··· 333 334 334 335 asm volatile( 335 336 #ifndef __s390x__ 336 - " l %0,0(%3)\n" 337 + " l %0,%2\n" 337 338 "0: lr %1,%0\n" 338 - " ar %1,%5\n" 339 - " cs %0,%1,0(%3)\n" 339 + " ar %1,%4\n" 340 + " cs %0,%1,%2\n" 340 341 " jl 0b" 341 342 #else /* __s390x__ */ 342 - " lg %0,0(%3)\n" 343 + " lg %0,%2\n" 343 344 "0: lgr %1,%0\n" 344 - " agr %1,%5\n" 345 - " csg %0,%1,0(%3)\n" 345 + " agr %1,%4\n" 346 + " csg %0,%1,%2\n" 346 347 " jl 0b" 347 348 #endif /* __s390x__ */ 348 - : "=&d" (old), "=&d" (new), "=m" (sem->count) 349 - : "a" (&sem->count), "m" (sem->count), "d" (delta) 349 + : "=&d" (old), "=&d" (new), "=Q" (sem->count) 350 + : "Q" (sem->count), "d" (delta) 350 351 : "cc", "memory"); 351 352 } 352 353 ··· 359 360 360 361 asm volatile( 361 362 #ifndef __s390x__ 362 - " l %0,0(%3)\n" 363 + " l %0,%2\n" 363 364 "0: lr %1,%0\n" 364 - " ar %1,%5\n" 365 - " cs %0,%1,0(%3)\n" 365 + " ar %1,%4\n" 366 + " cs %0,%1,%2\n" 366 367 " jl 0b" 367 368 #else /* __s390x__ */ 368 - " lg %0,0(%3)\n" 369 + " lg %0,%2\n" 369 370 "0: lgr %1,%0\n" 370 - " agr %1,%5\n" 371 - " csg %0,%1,0(%3)\n" 371 + " agr %1,%4\n" 372 + " csg %0,%1,%2\n" 372 373 " jl 0b" 373 374 #endif /* __s390x__ */ 374 - : "=&d" (old), "=&d" (new), "=m" (sem->count) 375 - : "a" (&sem->count), "m" (sem->count), "d" (delta) 375 + : "=&d" (old), "=&d" (new), "=Q" (sem->count) 376 + : "Q" (sem->count), "d" (delta) 376 377 : "cc", "memory"); 377 378 return new; 378 379 }
-18
arch/s390/include/asm/spinlock.h
··· 13 13 14 14 #include <linux/smp.h> 15 15 16 - #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) 17 - 18 16 static inline int 19 17 _raw_compare_and_swap(volatile unsigned int *lock, 20 18 unsigned int old, unsigned int new) ··· 24 26 : "cc", "memory" ); 25 27 return old; 26 28 } 27 - 28 - #else /* __GNUC__ */ 29 - 30 - static inline int 31 - _raw_compare_and_swap(volatile unsigned int *lock, 32 - unsigned int old, unsigned int new) 33 - { 34 - asm volatile( 35 - " cs %0,%3,0(%4)" 36 - : "=d" (old), "=m" (*lock) 37 - : "0" (old), "d" (new), "a" (lock), "m" (*lock) 38 - : "cc", "memory" ); 39 - return old; 40 - } 41 - 42 - #endif /* __GNUC__ */ 43 29 44 30 /* 45 31 * Simple spin lock operations. There are two variants, one clears IRQ's
+8 -8
arch/s390/include/asm/swab.h
··· 47 47 48 48 asm volatile( 49 49 #ifndef __s390x__ 50 - " icm %0,8,3(%1)\n" 51 - " icm %0,4,2(%1)\n" 52 - " icm %0,2,1(%1)\n" 53 - " ic %0,0(%1)" 54 - : "=&d" (result) : "a" (x), "m" (*x) : "cc"); 50 + " icm %0,8,%O1+3(%R1)\n" 51 + " icm %0,4,%O1+2(%R1)\n" 52 + " icm %0,2,%O1+1(%R1)\n" 53 + " ic %0,%1" 54 + : "=&d" (result) : "Q" (*x) : "cc"); 55 55 #else /* __s390x__ */ 56 56 " lrv %0,%1" 57 57 : "=d" (result) : "m" (*x)); ··· 77 77 78 78 asm volatile( 79 79 #ifndef __s390x__ 80 - " icm %0,2,1(%1)\n" 81 - " ic %0,0(%1)\n" 82 - : "=&d" (result) : "a" (x), "m" (*x) : "cc"); 80 + " icm %0,2,%O+1(%R1)\n" 81 + " ic %0,%1\n" 82 + : "=&d" (result) : "Q" (*x) : "cc"); 83 83 #else /* __s390x__ */ 84 84 " lrvh %0,%1" 85 85 : "=d" (result) : "m" (*x));
+85 -83
arch/s390/include/asm/system.h
··· 24 24 static inline void save_fp_regs(s390_fp_regs *fpregs) 25 25 { 26 26 asm volatile( 27 - " std 0,8(%1)\n" 28 - " std 2,24(%1)\n" 29 - " std 4,40(%1)\n" 30 - " std 6,56(%1)" 31 - : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory"); 27 + " std 0,%O0+8(%R0)\n" 28 + " std 2,%O0+24(%R0)\n" 29 + " std 4,%O0+40(%R0)\n" 30 + " std 6,%O0+56(%R0)" 31 + : "=Q" (*fpregs) : "Q" (*fpregs)); 32 32 if (!MACHINE_HAS_IEEE) 33 33 return; 34 34 asm volatile( 35 - " stfpc 0(%1)\n" 36 - " std 1,16(%1)\n" 37 - " std 3,32(%1)\n" 38 - " std 5,48(%1)\n" 39 - " std 7,64(%1)\n" 40 - " std 8,72(%1)\n" 41 - " std 9,80(%1)\n" 42 - " std 10,88(%1)\n" 43 - " std 11,96(%1)\n" 44 - " std 12,104(%1)\n" 45 - " std 13,112(%1)\n" 46 - " std 14,120(%1)\n" 47 - " std 15,128(%1)\n" 48 - : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory"); 35 + " stfpc %0\n" 36 + " std 1,%O0+16(%R0)\n" 37 + " std 3,%O0+32(%R0)\n" 38 + " std 5,%O0+48(%R0)\n" 39 + " std 7,%O0+64(%R0)\n" 40 + " std 8,%O0+72(%R0)\n" 41 + " std 9,%O0+80(%R0)\n" 42 + " std 10,%O0+88(%R0)\n" 43 + " std 11,%O0+96(%R0)\n" 44 + " std 12,%O0+104(%R0)\n" 45 + " std 13,%O0+112(%R0)\n" 46 + " std 14,%O0+120(%R0)\n" 47 + " std 15,%O0+128(%R0)\n" 48 + : "=Q" (*fpregs) : "Q" (*fpregs)); 49 49 } 50 50 51 51 static inline void restore_fp_regs(s390_fp_regs *fpregs) 52 52 { 53 53 asm volatile( 54 - " ld 0,8(%0)\n" 55 - " ld 2,24(%0)\n" 56 - " ld 4,40(%0)\n" 57 - " ld 6,56(%0)" 58 - : : "a" (fpregs), "m" (*fpregs)); 54 + " ld 0,%O0+8(%R0)\n" 55 + " ld 2,%O0+24(%R0)\n" 56 + " ld 4,%O0+40(%R0)\n" 57 + " ld 6,%O0+56(%R0)" 58 + : : "Q" (*fpregs)); 59 59 if (!MACHINE_HAS_IEEE) 60 60 return; 61 61 asm volatile( 62 - " lfpc 0(%0)\n" 63 - " ld 1,16(%0)\n" 64 - " ld 3,32(%0)\n" 65 - " ld 5,48(%0)\n" 66 - " ld 7,64(%0)\n" 67 - " ld 8,72(%0)\n" 68 - " ld 9,80(%0)\n" 69 - " ld 10,88(%0)\n" 70 - " ld 11,96(%0)\n" 71 - " ld 12,104(%0)\n" 72 - " ld 13,112(%0)\n" 73 - " ld 14,120(%0)\n" 74 - " ld 15,128(%0)\n" 75 - : : "a" (fpregs), "m" (*fpregs)); 62 + " lfpc %0\n" 63 + " ld 1,%O0+16(%R0)\n" 64 + " ld 3,%O0+32(%R0)\n" 65 + " ld 5,%O0+48(%R0)\n" 66 + " ld 7,%O0+64(%R0)\n" 67 + " ld 8,%O0+72(%R0)\n" 68 + " ld 9,%O0+80(%R0)\n" 69 + " ld 10,%O0+88(%R0)\n" 70 + " ld 11,%O0+96(%R0)\n" 71 + " ld 12,%O0+104(%R0)\n" 72 + " ld 13,%O0+112(%R0)\n" 73 + " ld 14,%O0+120(%R0)\n" 74 + " ld 15,%O0+128(%R0)\n" 75 + : : "Q" (*fpregs)); 76 76 } 77 77 78 78 static inline void save_access_regs(unsigned int *acrs) 79 79 { 80 - asm volatile("stam 0,15,0(%0)" : : "a" (acrs) : "memory"); 80 + asm volatile("stam 0,15,%0" : "=Q" (*acrs)); 81 81 } 82 82 83 83 static inline void restore_access_regs(unsigned int *acrs) 84 84 { 85 - asm volatile("lam 0,15,0(%0)" : : "a" (acrs)); 85 + asm volatile("lam 0,15,%0" : : "Q" (*acrs)); 86 86 } 87 87 88 88 #define switch_to(prev,next,last) do { \ ··· 139 139 shift = (3 ^ (addr & 3)) << 3; 140 140 addr ^= addr & 3; 141 141 asm volatile( 142 - " l %0,0(%4)\n" 142 + " l %0,%4\n" 143 143 "0: lr 0,%0\n" 144 144 " nr 0,%3\n" 145 145 " or 0,%2\n" 146 - " cs %0,0,0(%4)\n" 146 + " cs %0,0,%4\n" 147 147 " jl 0b\n" 148 - : "=&d" (old), "=m" (*(int *) addr) 149 - : "d" (x << shift), "d" (~(255 << shift)), "a" (addr), 150 - "m" (*(int *) addr) : "memory", "cc", "0"); 148 + : "=&d" (old), "=Q" (*(int *) addr) 149 + : "d" (x << shift), "d" (~(255 << shift)), 150 + "Q" (*(int *) addr) : "memory", "cc", "0"); 151 151 return old >> shift; 152 152 case 2: 153 153 addr = (unsigned long) ptr; 154 154 shift = (2 ^ (addr & 2)) << 3; 155 155 addr ^= addr & 2; 156 156 asm volatile( 157 - " l %0,0(%4)\n" 157 + " l %0,%4\n" 158 158 "0: lr 0,%0\n" 159 159 " nr 0,%3\n" 160 160 " or 0,%2\n" 161 - " cs %0,0,0(%4)\n" 161 + " cs %0,0,%4\n" 162 162 " jl 0b\n" 163 - : "=&d" (old), "=m" (*(int *) addr) 164 - : "d" (x << shift), "d" (~(65535 << shift)), "a" (addr), 165 - "m" (*(int *) addr) : "memory", "cc", "0"); 163 + : "=&d" (old), "=Q" (*(int *) addr) 164 + : "d" (x << shift), "d" (~(65535 << shift)), 165 + "Q" (*(int *) addr) : "memory", "cc", "0"); 166 166 return old >> shift; 167 167 case 4: 168 168 asm volatile( 169 - " l %0,0(%3)\n" 170 - "0: cs %0,%2,0(%3)\n" 169 + " l %0,%3\n" 170 + "0: cs %0,%2,%3\n" 171 171 " jl 0b\n" 172 - : "=&d" (old), "=m" (*(int *) ptr) 173 - : "d" (x), "a" (ptr), "m" (*(int *) ptr) 172 + : "=&d" (old), "=Q" (*(int *) ptr) 173 + : "d" (x), "Q" (*(int *) ptr) 174 174 : "memory", "cc"); 175 175 return old; 176 176 #ifdef __s390x__ 177 177 case 8: 178 178 asm volatile( 179 - " lg %0,0(%3)\n" 180 - "0: csg %0,%2,0(%3)\n" 179 + " lg %0,%3\n" 180 + "0: csg %0,%2,%3\n" 181 181 " jl 0b\n" 182 182 : "=&d" (old), "=m" (*(long *) ptr) 183 - : "d" (x), "a" (ptr), "m" (*(long *) ptr) 183 + : "d" (x), "Q" (*(long *) ptr) 184 184 : "memory", "cc"); 185 185 return old; 186 186 #endif /* __s390x__ */ ··· 215 215 shift = (3 ^ (addr & 3)) << 3; 216 216 addr ^= addr & 3; 217 217 asm volatile( 218 - " l %0,0(%4)\n" 218 + " l %0,%2\n" 219 219 "0: nr %0,%5\n" 220 220 " lr %1,%0\n" 221 221 " or %0,%2\n" 222 222 " or %1,%3\n" 223 - " cs %0,%1,0(%4)\n" 223 + " cs %0,%1,%2\n" 224 224 " jnl 1f\n" 225 225 " xr %1,%0\n" 226 226 " nr %1,%5\n" 227 227 " jnz 0b\n" 228 228 "1:" 229 - : "=&d" (prev), "=&d" (tmp) 230 - : "d" (old << shift), "d" (new << shift), "a" (ptr), 231 - "d" (~(255 << shift)) 229 + : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr) 230 + : "d" (old << shift), "d" (new << shift), 231 + "d" (~(255 << shift)), "Q" (*(int *) ptr) 232 232 : "memory", "cc"); 233 233 return prev >> shift; 234 234 case 2: ··· 236 236 shift = (2 ^ (addr & 2)) << 3; 237 237 addr ^= addr & 2; 238 238 asm volatile( 239 - " l %0,0(%4)\n" 239 + " l %0,%2\n" 240 240 "0: nr %0,%5\n" 241 241 " lr %1,%0\n" 242 242 " or %0,%2\n" 243 243 " or %1,%3\n" 244 - " cs %0,%1,0(%4)\n" 244 + " cs %0,%1,%2\n" 245 245 " jnl 1f\n" 246 246 " xr %1,%0\n" 247 247 " nr %1,%5\n" 248 248 " jnz 0b\n" 249 249 "1:" 250 - : "=&d" (prev), "=&d" (tmp) 251 - : "d" (old << shift), "d" (new << shift), "a" (ptr), 252 - "d" (~(65535 << shift)) 250 + : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr) 251 + : "d" (old << shift), "d" (new << shift), 252 + "d" (~(65535 << shift)), "Q" (*(int *) ptr) 253 253 : "memory", "cc"); 254 254 return prev >> shift; 255 255 case 4: 256 256 asm volatile( 257 - " cs %0,%2,0(%3)\n" 258 - : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr) 257 + " cs %0,%3,%1\n" 258 + : "=&d" (prev), "=Q" (*(int *) ptr) 259 + : "0" (old), "d" (new), "Q" (*(int *) ptr) 259 260 : "memory", "cc"); 260 261 return prev; 261 262 #ifdef __s390x__ 262 263 case 8: 263 264 asm volatile( 264 - " csg %0,%2,0(%3)\n" 265 - : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr) 265 + " csg %0,%3,%1\n" 266 + : "=&d" (prev), "=Q" (*(long *) ptr) 267 + : "0" (old), "d" (new), "Q" (*(long *) ptr) 266 268 : "memory", "cc"); 267 269 return prev; 268 270 #endif /* __s390x__ */ ··· 304 302 #define __ctl_load(array, low, high) ({ \ 305 303 typedef struct { char _[sizeof(array)]; } addrtype; \ 306 304 asm volatile( \ 307 - " lctlg %1,%2,0(%0)\n" \ 308 - : : "a" (&array), "i" (low), "i" (high), \ 309 - "m" (*(addrtype *)(&array))); \ 305 + " lctlg %1,%2,%0\n" \ 306 + : : "Q" (*(addrtype *)(&array)), \ 307 + "i" (low), "i" (high)); \ 310 308 }) 311 309 312 310 #define __ctl_store(array, low, high) ({ \ 313 311 typedef struct { char _[sizeof(array)]; } addrtype; \ 314 312 asm volatile( \ 315 - " stctg %2,%3,0(%1)\n" \ 316 - : "=m" (*(addrtype *)(&array)) \ 317 - : "a" (&array), "i" (low), "i" (high)); \ 313 + " stctg %1,%2,%0\n" \ 314 + : "=Q" (*(addrtype *)(&array)) \ 315 + : "i" (low), "i" (high)); \ 318 316 }) 319 317 320 318 #else /* __s390x__ */ ··· 322 320 #define __ctl_load(array, low, high) ({ \ 323 321 typedef struct { char _[sizeof(array)]; } addrtype; \ 324 322 asm volatile( \ 325 - " lctl %1,%2,0(%0)\n" \ 326 - : : "a" (&array), "i" (low), "i" (high), \ 327 - "m" (*(addrtype *)(&array))); \ 323 + " lctl %1,%2,%0\n" \ 324 + : : "Q" (*(addrtype *)(&array)), \ 325 + "i" (low), "i" (high)); \ 328 326 }) 329 327 330 328 #define __ctl_store(array, low, high) ({ \ 331 329 typedef struct { char _[sizeof(array)]; } addrtype; \ 332 330 asm volatile( \ 333 - " stctl %2,%3,0(%1)\n" \ 334 - : "=m" (*(addrtype *)(&array)) \ 335 - : "a" (&array), "i" (low), "i" (high)); \ 331 + " stctl %1,%2,%0\n" \ 332 + : "=Q" (*(addrtype *)(&array)) \ 333 + : "i" (low), "i" (high)); \ 336 334 }) 337 335 338 336 #endif /* __s390x__ */
+6 -16
arch/s390/include/asm/timex.h
··· 20 20 int cc; 21 21 22 22 asm volatile( 23 - " sck 0(%2)\n" 23 + " sck %1\n" 24 24 " ipm %0\n" 25 25 " srl %0,28\n" 26 - : "=d" (cc) : "m" (time), "a" (&time) : "cc"); 26 + : "=d" (cc) : "Q" (time) : "cc"); 27 27 return cc; 28 28 } 29 29 ··· 32 32 int cc; 33 33 34 34 asm volatile( 35 - " stck 0(%2)\n" 35 + " stck %1\n" 36 36 " ipm %0\n" 37 37 " srl %0,28\n" 38 - : "=d" (cc), "=m" (*time) : "a" (time) : "cc"); 38 + : "=d" (cc), "=Q" (*time) : : "cc"); 39 39 return cc; 40 40 } 41 41 42 42 static inline void set_clock_comparator(__u64 time) 43 43 { 44 - asm volatile("sckc 0(%1)" : : "m" (time), "a" (&time)); 44 + asm volatile("sckc %0" : : "Q" (time)); 45 45 } 46 46 47 47 static inline void store_clock_comparator(__u64 *time) 48 48 { 49 - asm volatile("stckc 0(%1)" : "=m" (*time) : "a" (time)); 49 + asm volatile("stckc %0" : "=Q" (*time)); 50 50 } 51 51 52 52 #define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ ··· 57 57 { 58 58 unsigned long long clk; 59 59 60 - #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) 61 60 asm volatile("stck %0" : "=Q" (clk) : : "cc"); 62 - #else /* __GNUC__ */ 63 - asm volatile("stck 0(%1)" : "=m" (clk) : "a" (&clk) : "cc"); 64 - #endif /* __GNUC__ */ 65 61 return clk; 66 62 } 67 63 ··· 65 69 { 66 70 unsigned char clk[16]; 67 71 68 - #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) 69 72 asm volatile("stcke %0" : "=Q" (clk) : : "cc"); 70 - #else /* __GNUC__ */ 71 - asm volatile("stcke 0(%1)" : "=m" (clk) 72 - : "a" (clk) : "cc"); 73 - #endif /* __GNUC__ */ 74 - 75 73 return *((unsigned long long *)&clk[1]); 76 74 } 77 75
+8
arch/s390/kernel/asm-offsets.c
··· 9 9 #include <asm/vdso.h> 10 10 #include <asm/sigp.h> 11 11 12 + /* 13 + * Make sure that the compiler is new enough. We want a compiler that 14 + * is known to work with the "Q" assembler constraint. 15 + */ 16 + #if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3) 17 + #error Your compiler is too old; please use version 3.3.3 or newer 18 + #endif 19 + 12 20 int main(void) 13 21 { 14 22 DEFINE(__THREAD_info, offsetof(struct task_struct, stack));