Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86, asm: use bool for bitops and other assembly outputs

The gcc people have confirmed that using "bool" when combined with
inline assembly always is treated as a byte-sized operand that can be
assumed to be 0 or 1, which is exactly what the SET instruction
emits. Change the output types and intermediate variables of as many
operations as practical to "bool".

Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Link: http://lkml.kernel.org/r/1465414726-197858-3-git-send-email-hpa@linux.intel.com
Reviewed-by: Andy Lutomirski <luto@kernel.org>
Reviewed-by: Borislav Petkov <bp@suse.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>

authored by

H. Peter Anvin and committed by
H. Peter Anvin
117780ee 2823d4da

+69 -66
+5 -3
arch/x86/boot/bitops.h
··· 16 16 #define BOOT_BITOPS_H 17 17 #define _LINUX_BITOPS_H /* Inhibit inclusion of <linux/bitops.h> */ 18 18 19 - static inline int constant_test_bit(int nr, const void *addr) 19 + #include <linux/types.h> 20 + 21 + static inline bool constant_test_bit(int nr, const void *addr) 20 22 { 21 23 const u32 *p = (const u32 *)addr; 22 24 return ((1UL << (nr & 31)) & (p[nr >> 5])) != 0; 23 25 } 24 - static inline int variable_test_bit(int nr, const void *addr) 26 + static inline bool variable_test_bit(int nr, const void *addr) 25 27 { 26 - u8 v; 28 + bool v; 27 29 const u32 *p = (const u32 *)addr; 28 30 29 31 asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
+4 -4
arch/x86/boot/boot.h
··· 176 176 } 177 177 178 178 /* Note: these only return true/false, not a signed return value! */ 179 - static inline int memcmp_fs(const void *s1, addr_t s2, size_t len) 179 + static inline bool memcmp_fs(const void *s1, addr_t s2, size_t len) 180 180 { 181 - u8 diff; 181 + bool diff; 182 182 asm volatile("fs; repe; cmpsb; setnz %0" 183 183 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len)); 184 184 return diff; 185 185 } 186 - static inline int memcmp_gs(const void *s1, addr_t s2, size_t len) 186 + static inline bool memcmp_gs(const void *s1, addr_t s2, size_t len) 187 187 { 188 - u8 diff; 188 + bool diff; 189 189 asm volatile("gs; repe; cmpsb; setnz %0" 190 190 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len)); 191 191 return diff;
+1 -1
arch/x86/boot/string.c
··· 17 17 18 18 int memcmp(const void *s1, const void *s2, size_t len) 19 19 { 20 - u8 diff; 20 + bool diff; 21 21 asm("repe; cmpsb; setnz %0" 22 22 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len)); 23 23 return diff;
+3 -3
arch/x86/include/asm/apm.h
··· 45 45 : "memory", "cc"); 46 46 } 47 47 48 - static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in, 49 - u32 ecx_in, u32 *eax) 48 + static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in, 49 + u32 ecx_in, u32 *eax) 50 50 { 51 51 int cx, dx, si; 52 - u8 error; 52 + bool error; 53 53 54 54 /* 55 55 * N.B. We do NOT need a cld after the BIOS call
+8 -8
arch/x86/include/asm/archrandom.h
··· 43 43 #ifdef CONFIG_ARCH_RANDOM 44 44 45 45 /* Instead of arch_get_random_long() when alternatives haven't run. */ 46 - static inline int rdrand_long(unsigned long *v) 46 + static inline bool rdrand_long(unsigned long *v) 47 47 { 48 48 int ok; 49 49 asm volatile("1: " RDRAND_LONG "\n\t" ··· 53 53 "2:" 54 54 : "=r" (ok), "=a" (*v) 55 55 : "0" (RDRAND_RETRY_LOOPS)); 56 - return ok; 56 + return !!ok; 57 57 } 58 58 59 59 /* A single attempt at RDSEED */ 60 60 static inline bool rdseed_long(unsigned long *v) 61 61 { 62 - unsigned char ok; 62 + bool ok; 63 63 asm volatile(RDSEED_LONG "\n\t" 64 64 "setc %0" 65 65 : "=qm" (ok), "=a" (*v)); ··· 67 67 } 68 68 69 69 #define GET_RANDOM(name, type, rdrand, nop) \ 70 - static inline int name(type *v) \ 70 + static inline bool name(type *v) \ 71 71 { \ 72 72 int ok; \ 73 73 alternative_io("movl $0, %0\n\t" \ ··· 80 80 X86_FEATURE_RDRAND, \ 81 81 ASM_OUTPUT2("=r" (ok), "=a" (*v)), \ 82 82 "0" (RDRAND_RETRY_LOOPS)); \ 83 - return ok; \ 83 + return !!ok; \ 84 84 } 85 85 86 86 #define GET_SEED(name, type, rdseed, nop) \ 87 - static inline int name(type *v) \ 87 + static inline bool name(type *v) \ 88 88 { \ 89 - unsigned char ok; \ 89 + bool ok; \ 90 90 alternative_io("movb $0, %0\n\t" \ 91 91 nop, \ 92 92 rdseed "\n\t" \ ··· 119 119 120 120 #else 121 121 122 - static inline int rdrand_long(unsigned long *v) 122 + static inline bool rdrand_long(unsigned long *v) 123 123 { 124 124 return 0; 125 125 }
+4 -4
arch/x86/include/asm/atomic.h
··· 75 75 * true if the result is zero, or false for all 76 76 * other cases. 77 77 */ 78 - static __always_inline int atomic_sub_and_test(int i, atomic_t *v) 78 + static __always_inline bool atomic_sub_and_test(int i, atomic_t *v) 79 79 { 80 80 GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e"); 81 81 } ··· 112 112 * returns true if the result is 0, or false for all other 113 113 * cases. 114 114 */ 115 - static __always_inline int atomic_dec_and_test(atomic_t *v) 115 + static __always_inline bool atomic_dec_and_test(atomic_t *v) 116 116 { 117 117 GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e"); 118 118 } ··· 125 125 * and returns true if the result is zero, or false for all 126 126 * other cases. 127 127 */ 128 - static __always_inline int atomic_inc_and_test(atomic_t *v) 128 + static __always_inline bool atomic_inc_and_test(atomic_t *v) 129 129 { 130 130 GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e"); 131 131 } ··· 139 139 * if the result is negative, or false when 140 140 * result is greater than or equal to zero. 141 141 */ 142 - static __always_inline int atomic_add_negative(int i, atomic_t *v) 142 + static __always_inline bool atomic_add_negative(int i, atomic_t *v) 143 143 { 144 144 GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s"); 145 145 }
+5 -5
arch/x86/include/asm/atomic64_64.h
··· 70 70 * true if the result is zero, or false for all 71 71 * other cases. 72 72 */ 73 - static inline int atomic64_sub_and_test(long i, atomic64_t *v) 73 + static inline bool atomic64_sub_and_test(long i, atomic64_t *v) 74 74 { 75 75 GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e"); 76 76 } ··· 109 109 * returns true if the result is 0, or false for all other 110 110 * cases. 111 111 */ 112 - static inline int atomic64_dec_and_test(atomic64_t *v) 112 + static inline bool atomic64_dec_and_test(atomic64_t *v) 113 113 { 114 114 GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e"); 115 115 } ··· 122 122 * and returns true if the result is zero, or false for all 123 123 * other cases. 124 124 */ 125 - static inline int atomic64_inc_and_test(atomic64_t *v) 125 + static inline bool atomic64_inc_and_test(atomic64_t *v) 126 126 { 127 127 GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e"); 128 128 } ··· 136 136 * if the result is negative, or false when 137 137 * result is greater than or equal to zero. 138 138 */ 139 - static inline int atomic64_add_negative(long i, atomic64_t *v) 139 + static inline bool atomic64_add_negative(long i, atomic64_t *v) 140 140 { 141 141 GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s"); 142 142 } ··· 180 180 * Atomically adds @a to @v, so long as it was not @u. 181 181 * Returns the old value of @v. 182 182 */ 183 - static inline int atomic64_add_unless(atomic64_t *v, long a, long u) 183 + static inline bool atomic64_add_unless(atomic64_t *v, long a, long u) 184 184 { 185 185 long c, old; 186 186 c = atomic64_read(v);
+14 -14
arch/x86/include/asm/bitops.h
··· 201 201 * This operation is atomic and cannot be reordered. 202 202 * It also implies a memory barrier. 203 203 */ 204 - static __always_inline int test_and_set_bit(long nr, volatile unsigned long *addr) 204 + static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr) 205 205 { 206 206 GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c"); 207 207 } ··· 213 213 * 214 214 * This is the same as test_and_set_bit on x86. 215 215 */ 216 - static __always_inline int 216 + static __always_inline bool 217 217 test_and_set_bit_lock(long nr, volatile unsigned long *addr) 218 218 { 219 219 return test_and_set_bit(nr, addr); ··· 228 228 * If two examples of this operation race, one can appear to succeed 229 229 * but actually fail. You must protect multiple accesses with a lock. 230 230 */ 231 - static __always_inline int __test_and_set_bit(long nr, volatile unsigned long *addr) 231 + static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *addr) 232 232 { 233 - unsigned char oldbit; 233 + bool oldbit; 234 234 235 235 asm("bts %2,%1\n\t" 236 236 "setc %0" ··· 247 247 * This operation is atomic and cannot be reordered. 248 248 * It also implies a memory barrier. 249 249 */ 250 - static __always_inline int test_and_clear_bit(long nr, volatile unsigned long *addr) 250 + static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) 251 251 { 252 252 GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c"); 253 253 } ··· 268 268 * accessed from a hypervisor on the same CPU if running in a VM: don't change 269 269 * this without also updating arch/x86/kernel/kvm.c 270 270 */ 271 - static __always_inline int __test_and_clear_bit(long nr, volatile unsigned long *addr) 271 + static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr) 272 272 { 273 - unsigned char oldbit; 273 + bool oldbit; 274 274 275 275 asm volatile("btr %2,%1\n\t" 276 276 "setc %0" ··· 280 280 } 281 281 282 282 /* WARNING: non atomic and it can be reordered! */ 283 - static __always_inline int __test_and_change_bit(long nr, volatile unsigned long *addr) 283 + static __always_inline bool __test_and_change_bit(long nr, volatile unsigned long *addr) 284 284 { 285 - unsigned char oldbit; 285 + bool oldbit; 286 286 287 287 asm volatile("btc %2,%1\n\t" 288 288 "setc %0" ··· 300 300 * This operation is atomic and cannot be reordered. 301 301 * It also implies a memory barrier. 302 302 */ 303 - static __always_inline int test_and_change_bit(long nr, volatile unsigned long *addr) 303 + static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr) 304 304 { 305 305 GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c"); 306 306 } 307 307 308 - static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr) 308 + static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr) 309 309 { 310 310 return ((1UL << (nr & (BITS_PER_LONG-1))) & 311 311 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; 312 312 } 313 313 314 - static __always_inline int variable_test_bit(long nr, volatile const unsigned long *addr) 314 + static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr) 315 315 { 316 - unsigned char oldbit; 316 + bool oldbit; 317 317 318 318 asm volatile("bt %2,%1\n\t" 319 319 "setc %0" ··· 329 329 * @nr: bit number to test 330 330 * @addr: Address to start counting from 331 331 */ 332 - static int test_bit(int nr, const volatile unsigned long *addr); 332 + static bool test_bit(int nr, const volatile unsigned long *addr); 333 333 #endif 334 334 335 335 #define test_bit(nr, addr) \
+4 -4
arch/x86/include/asm/local.h
··· 50 50 * true if the result is zero, or false for all 51 51 * other cases. 52 52 */ 53 - static inline int local_sub_and_test(long i, local_t *l) 53 + static inline bool local_sub_and_test(long i, local_t *l) 54 54 { 55 55 GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e"); 56 56 } ··· 63 63 * returns true if the result is 0, or false for all other 64 64 * cases. 65 65 */ 66 - static inline int local_dec_and_test(local_t *l) 66 + static inline bool local_dec_and_test(local_t *l) 67 67 { 68 68 GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e"); 69 69 } ··· 76 76 * and returns true if the result is zero, or false for all 77 77 * other cases. 78 78 */ 79 - static inline int local_inc_and_test(local_t *l) 79 + static inline bool local_inc_and_test(local_t *l) 80 80 { 81 81 GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e"); 82 82 } ··· 90 90 * if the result is negative, or false when 91 91 * result is greater than or equal to zero. 92 92 */ 93 - static inline int local_add_negative(long i, local_t *l) 93 + static inline bool local_add_negative(long i, local_t *l) 94 94 { 95 95 GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s"); 96 96 }
+4 -4
arch/x86/include/asm/percpu.h
··· 510 510 /* This is not atomic against other CPUs -- CPU preemption needs to be off */ 511 511 #define x86_test_and_clear_bit_percpu(bit, var) \ 512 512 ({ \ 513 - unsigned char old__; \ 513 + bool old__; \ 514 514 asm volatile("btr %2,"__percpu_arg(1)"\n\tsetc %0" \ 515 515 : "=qm" (old__), "+m" (var) \ 516 516 : "dIr" (bit)); \ 517 517 old__; \ 518 518 }) 519 519 520 - static __always_inline int x86_this_cpu_constant_test_bit(unsigned int nr, 520 + static __always_inline bool x86_this_cpu_constant_test_bit(unsigned int nr, 521 521 const unsigned long __percpu *addr) 522 522 { 523 523 unsigned long __percpu *a = (unsigned long *)addr + nr / BITS_PER_LONG; ··· 529 529 #endif 530 530 } 531 531 532 - static inline int x86_this_cpu_variable_test_bit(int nr, 532 + static inline bool x86_this_cpu_variable_test_bit(int nr, 533 533 const unsigned long __percpu *addr) 534 534 { 535 - unsigned char oldbit; 535 + bool oldbit; 536 536 537 537 asm volatile("bt "__percpu_arg(2)",%1\n\t" 538 538 "setc %0"
+2 -2
arch/x86/include/asm/rmwcc.h
··· 23 23 24 24 #define __GEN_RMWcc(fullop, var, cc, ...) \ 25 25 do { \ 26 - char c; \ 26 + bool c; \ 27 27 asm volatile (fullop "; set" cc " %1" \ 28 28 : "+m" (var), "=qm" (c) \ 29 29 : __VA_ARGS__ : "memory"); \ 30 - return c != 0; \ 30 + return c; \ 31 31 } while (0) 32 32 33 33 #define GEN_UNARY_RMWcc(op, var, arg0, cc) \
+9 -8
arch/x86/include/asm/rwsem.h
··· 77 77 /* 78 78 * trylock for reading -- returns 1 if successful, 0 if contention 79 79 */ 80 - static inline int __down_read_trylock(struct rw_semaphore *sem) 80 + static inline bool __down_read_trylock(struct rw_semaphore *sem) 81 81 { 82 82 long result, tmp; 83 83 asm volatile("# beginning __down_read_trylock\n\t" ··· 93 93 : "+m" (sem->count), "=&a" (result), "=&r" (tmp) 94 94 : "i" (RWSEM_ACTIVE_READ_BIAS) 95 95 : "memory", "cc"); 96 - return result >= 0 ? 1 : 0; 96 + return result >= 0; 97 97 } 98 98 99 99 /* ··· 134 134 /* 135 135 * trylock for writing -- returns 1 if successful, 0 if contention 136 136 */ 137 - static inline int __down_write_trylock(struct rw_semaphore *sem) 137 + static inline bool __down_write_trylock(struct rw_semaphore *sem) 138 138 { 139 - long result, tmp; 139 + bool result; 140 + long tmp0, tmp1; 140 141 asm volatile("# beginning __down_write_trylock\n\t" 141 142 " mov %0,%1\n\t" 142 143 "1:\n\t" ··· 145 144 /* was the active mask 0 before? */ 146 145 " jnz 2f\n\t" 147 146 " mov %1,%2\n\t" 148 - " add %3,%2\n\t" 147 + " add %4,%2\n\t" 149 148 LOCK_PREFIX " cmpxchg %2,%0\n\t" 150 149 " jnz 1b\n\t" 151 150 "2:\n\t" 152 - " sete %b1\n\t" 153 - " movzbl %b1, %k1\n\t" 151 + " sete %3\n\t" 154 152 "# ending __down_write_trylock\n\t" 155 - : "+m" (sem->count), "=&a" (result), "=&r" (tmp) 153 + : "+m" (sem->count), "=&a" (tmp0), "=&r" (tmp1), 154 + "=qm" (result) 156 155 : "er" (RWSEM_ACTIVE_WRITE_BIAS) 157 156 : "memory", "cc"); 158 157 return result;
+6 -6
include/linux/random.h
··· 95 95 #ifdef CONFIG_ARCH_RANDOM 96 96 # include <asm/archrandom.h> 97 97 #else 98 - static inline int arch_get_random_long(unsigned long *v) 98 + static inline bool arch_get_random_long(unsigned long *v) 99 99 { 100 100 return 0; 101 101 } 102 - static inline int arch_get_random_int(unsigned int *v) 102 + static inline bool arch_get_random_int(unsigned int *v) 103 103 { 104 104 return 0; 105 105 } 106 - static inline int arch_has_random(void) 106 + static inline bool arch_has_random(void) 107 107 { 108 108 return 0; 109 109 } 110 - static inline int arch_get_random_seed_long(unsigned long *v) 110 + static inline bool arch_get_random_seed_long(unsigned long *v) 111 111 { 112 112 return 0; 113 113 } 114 - static inline int arch_get_random_seed_int(unsigned int *v) 114 + static inline bool arch_get_random_seed_int(unsigned int *v) 115 115 { 116 116 return 0; 117 117 } 118 - static inline int arch_has_random_seed(void) 118 + static inline bool arch_has_random_seed(void) 119 119 { 120 120 return 0; 121 121 }