Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

lib: mul_u64_u64_div_u64(): optimise multiply on 32bit x86

gcc generates horrid code for both ((u64)u32_a * u32_b) and (u64_a +
u32_b). As well as the extra instructions it can generate a lot of spills
to stack (including spills of constant zeros and even multiplies by
constant zero).

mul_u32_u32() already exists to optimise the multiply. Add a similar
add_u64_32() for the addition. Disable both for clang - it generates
better code without them.

Move the 64x64 => 128 multiply into a static inline helper function for
code clarity. No need for the a/b_hi/lo variables, the implicit casts on
the function calls do the work for us. Should have minimal effect on the
generated code.

Use mul_u32_u32() and add_u64_u32() in the 64x64 => 128 multiply in
mul_u64_add_u64_div_u64().

Link: https://lkml.kernel.org/r/20251105201035.64043-8-david.laight.linux@gmail.com
Signed-off-by: David Laight <david.laight.linux@gmail.com>
Reviewed-by: Nicolas Pitre <npitre@baylibre.com>
Cc: Biju Das <biju.das.jz@bp.renesas.com>
Cc: Borislav Betkov <bp@alien8.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Li RongQing <lirongqing@baidu.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleinxer <tglx@linutronix.de>
Cc: Uwe Kleine-König <u.kleine-koenig@baylibre.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

David Laight and committed by
Andrew Morton
630f96a6 f0bff2eb

+56 -14
+19
arch/x86/include/asm/div64.h
··· 60 60 } 61 61 #define div_u64_rem div_u64_rem 62 62 63 + /* 64 + * gcc tends to zero extend 32bit values and do full 64bit maths. 65 + * Define asm functions that avoid this. 66 + * (clang generates better code for the C versions.) 67 + */ 68 + #ifndef __clang__ 63 69 static inline u64 mul_u32_u32(u32 a, u32 b) 64 70 { 65 71 u32 high, low; ··· 76 70 return low | ((u64)high) << 32; 77 71 } 78 72 #define mul_u32_u32 mul_u32_u32 73 + 74 + static inline u64 add_u64_u32(u64 a, u32 b) 75 + { 76 + u32 high = a >> 32, low = a; 77 + 78 + asm ("addl %[b], %[low]; adcl $0, %[high]" 79 + : [low] "+r" (low), [high] "+r" (high) 80 + : [b] "rm" (b) ); 81 + 82 + return low | (u64)high << 32; 83 + } 84 + #define add_u64_u32 add_u64_u32 85 + #endif 79 86 80 87 /* 81 88 * __div64_32() is never called on x86, so prevent the
+11
include/linux/math64.h
··· 158 158 } 159 159 #endif 160 160 161 + #ifndef add_u64_u32 162 + /* 163 + * Many a GCC version also messes this up. 164 + * Zero extending b and then spilling everything to stack. 165 + */ 166 + static inline u64 add_u64_u32(u64 a, u32 b) 167 + { 168 + return a + b; 169 + } 170 + #endif 171 + 161 172 #if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__) 162 173 163 174 #ifndef mul_u64_u32_shr
+26 -14
lib/math/div64.c
··· 186 186 #endif 187 187 188 188 #if !defined(mul_u64_add_u64_div_u64) || defined(test_mul_u64_add_u64_div_u64) 189 - u64 mul_u64_add_u64_div_u64(u64 a, u64 b, u64 c, u64 d) 190 - { 189 + 190 + #define mul_add(a, b, c) add_u64_u32(mul_u32_u32(a, b), c) 191 + 191 192 #if defined(__SIZEOF_INT128__) && !defined(test_mul_u64_add_u64_div_u64) 192 193 194 + static inline u64 mul_u64_u64_add_u64(u64 *p_lo, u64 a, u64 b, u64 c) 195 + { 193 196 /* native 64x64=128 bits multiplication */ 194 197 u128 prod = (u128)a * b + c; 195 - u64 n_lo = prod, n_hi = prod >> 64; 198 + 199 + *p_lo = prod; 200 + return prod >> 64; 201 + } 196 202 197 203 #else 198 204 199 - /* perform a 64x64=128 bits multiplication manually */ 200 - u32 a_lo = a, a_hi = a >> 32, b_lo = b, b_hi = b >> 32; 205 + static inline u64 mul_u64_u64_add_u64(u64 *p_lo, u64 a, u64 b, u64 c) 206 + { 207 + /* perform a 64x64=128 bits multiplication in 32bit chunks */ 201 208 u64 x, y, z; 202 209 203 210 /* Since (x-1)(x-1) + 2(x-1) == x.x - 1 two u32 can be added to a u64 */ 204 - x = (u64)a_lo * b_lo + (u32)c; 205 - y = (u64)a_lo * b_hi + (u32)(c >> 32); 206 - y += (u32)(x >> 32); 207 - z = (u64)a_hi * b_hi + (u32)(y >> 32); 208 - y = (u64)a_hi * b_lo + (u32)y; 209 - z += (u32)(y >> 32); 210 - x = (y << 32) + (u32)x; 211 - 212 - u64 n_lo = x, n_hi = z; 211 + x = mul_add(a, b, c); 212 + y = mul_add(a, b >> 32, c >> 32); 213 + y = add_u64_u32(y, x >> 32); 214 + z = mul_add(a >> 32, b >> 32, y >> 32); 215 + y = mul_add(a >> 32, b, y); 216 + *p_lo = (y << 32) + (u32)x; 217 + return add_u64_u32(z, y >> 32); 218 + } 213 219 214 220 #endif 221 + 222 + u64 mul_u64_add_u64_div_u64(u64 a, u64 b, u64 c, u64 d) 223 + { 224 + u64 n_lo, n_hi; 225 + 226 + n_hi = mul_u64_u64_add_u64(&n_lo, a, b, c); 215 227 216 228 if (!n_hi) 217 229 return div64_u64(n_lo, d);