Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

lib: add fast path for find_next_*_bit()

Similarly to bitmap functions, find_next_*_bit() users will benefit if
we'll handle a case of bitmaps that fit into a single word inline. In the
very best case, the compiler may replace a function call with a few
instructions.

This is the quite typical find_next_bit() user:

unsigned int cpumask_next(int n, const struct cpumask *srcp)
{
/* -1 is a legal arg here. */
if (n != -1)
cpumask_check(n);
return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1);
}
EXPORT_SYMBOL(cpumask_next);

Currently, on ARM64 the generated code looks like this:
0000000000000000 <cpumask_next>:
0: a9bf7bfd stp x29, x30, [sp, #-16]!
4: 11000402 add w2, w0, #0x1
8: aa0103e0 mov x0, x1
c: d2800401 mov x1, #0x40 // #64
10: 910003fd mov x29, sp
14: 93407c42 sxtw x2, w2
18: 94000000 bl 0 <find_next_bit>
1c: a8c17bfd ldp x29, x30, [sp], #16
20: d65f03c0 ret
24: d503201f nop

After applying this patch:
0000000000000140 <cpumask_next>:
140: 11000400 add w0, w0, #0x1
144: 93407c00 sxtw x0, w0
148: f100fc1f cmp x0, #0x3f
14c: 54000168 b.hi 178 <cpumask_next+0x38> // b.pmore
150: f9400023 ldr x3, [x1]
154: 92800001 mov x1, #0xffffffffffffffff // #-1
158: 9ac02020 lsl x0, x1, x0
15c: 52800802 mov w2, #0x40 // #64
160: 8a030001 and x1, x0, x3
164: dac00020 rbit x0, x1
168: f100003f cmp x1, #0x0
16c: dac01000 clz x0, x0
170: 1a800040 csel w0, w2, w0, eq // eq = none
174: d65f03c0 ret
178: 52800800 mov w0, #0x40 // #64
17c: d65f03c0 ret

find_next_bit() call is replaced with 6 instructions. find_next_bit()
itself is 41 instructions plus function call overhead.

Despite inlining, the scripts/bloat-o-meter report smaller .text size
after applying the series:
add/remove: 11/9 grow/shrink: 233/176 up/down: 5780/-6768 (-988)

Link: https://lkml.kernel.org/r/20210401003153.97325-10-yury.norov@gmail.com
Signed-off-by: Yury Norov <yury.norov@gmail.com>
Acked-by: Rasmus Villemoes <linux@rasmusvillemoes.dk>
Acked-by: Andy Shevchenko <andy.shevchenko@gmail.com>
Cc: Alexey Klimov <aklimov@redhat.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: David Sterba <dsterba@suse.com>
Cc: Dennis Zhou <dennis@kernel.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Jianpeng Ma <jianpeng.ma@intel.com>
Cc: Joe Perches <joe@perches.com>
Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Rich Felker <dalias@libc.org>
Cc: Stefano Brivio <sbrivio@redhat.com>
Cc: Wei Yang <richard.weiyang@linux.alibaba.com>
Cc: Wolfram Sang <wsa+renesas@sang-engineering.com>
Cc: Yoshinori Sato <ysato@users.osdn.me>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Yury Norov and committed by
Linus Torvalds
277a20a4 ea81c1ef

+51
+30
include/asm-generic/bitops/find.h
··· 20 20 unsigned long find_next_bit(const unsigned long *addr, unsigned long size, 21 21 unsigned long offset) 22 22 { 23 + if (small_const_nbits(size)) { 24 + unsigned long val; 25 + 26 + if (unlikely(offset >= size)) 27 + return size; 28 + 29 + val = *addr & GENMASK(size - 1, offset); 30 + return val ? __ffs(val) : size; 31 + } 32 + 23 33 return _find_next_bit(addr, NULL, size, offset, 0UL, 0); 24 34 } 25 35 #endif ··· 50 40 const unsigned long *addr2, unsigned long size, 51 41 unsigned long offset) 52 42 { 43 + if (small_const_nbits(size)) { 44 + unsigned long val; 45 + 46 + if (unlikely(offset >= size)) 47 + return size; 48 + 49 + val = *addr1 & *addr2 & GENMASK(size - 1, offset); 50 + return val ? __ffs(val) : size; 51 + } 52 + 53 53 return _find_next_bit(addr1, addr2, size, offset, 0UL, 0); 54 54 } 55 55 #endif ··· 78 58 unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, 79 59 unsigned long offset) 80 60 { 61 + if (small_const_nbits(size)) { 62 + unsigned long val; 63 + 64 + if (unlikely(offset >= size)) 65 + return size; 66 + 67 + val = *addr | ~GENMASK(size - 1, offset); 68 + return val == ~0UL ? size : ffz(val); 69 + } 70 + 81 71 return _find_next_bit(addr, NULL, size, offset, ~0UL, 0); 82 72 } 83 73 #endif
+21
include/asm-generic/bitops/le.h
··· 5 5 #include <asm-generic/bitops/find.h> 6 6 #include <asm/types.h> 7 7 #include <asm/byteorder.h> 8 + #include <linux/swab.h> 8 9 9 10 #if defined(__LITTLE_ENDIAN) 10 11 ··· 38 37 unsigned long find_next_zero_bit_le(const void *addr, unsigned 39 38 long size, unsigned long offset) 40 39 { 40 + if (small_const_nbits(size)) { 41 + unsigned long val = *(const unsigned long *)addr; 42 + 43 + if (unlikely(offset >= size)) 44 + return size; 45 + 46 + val = swab(val) | ~GENMASK(size - 1, offset); 47 + return val == ~0UL ? size : ffz(val); 48 + } 49 + 41 50 return _find_next_bit(addr, NULL, size, offset, ~0UL, 1); 42 51 } 43 52 #endif ··· 57 46 unsigned long find_next_bit_le(const void *addr, unsigned 58 47 long size, unsigned long offset) 59 48 { 49 + if (small_const_nbits(size)) { 50 + unsigned long val = *(const unsigned long *)addr; 51 + 52 + if (unlikely(offset >= size)) 53 + return size; 54 + 55 + val = swab(val) & GENMASK(size - 1, offset); 56 + return val ? __ffs(val) : size; 57 + } 58 + 60 59 return _find_next_bit(addr, NULL, size, offset, 0UL, 1); 61 60 } 62 61 #endif