at v6.12 3.1 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _TOOLS_LINUX_BITOPS_H_ 3#define _TOOLS_LINUX_BITOPS_H_ 4 5#include <asm/types.h> 6#include <limits.h> 7#ifndef __WORDSIZE 8#define __WORDSIZE (__SIZEOF_LONG__ * 8) 9#endif 10 11#ifndef BITS_PER_LONG 12# define BITS_PER_LONG __WORDSIZE 13#endif 14#include <linux/bits.h> 15#include <linux/compiler.h> 16 17#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) 18#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(long)) 19#define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(u64)) 20#define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(u32)) 21#define BITS_TO_BYTES(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(char)) 22 23#define BYTES_TO_BITS(nb) ((nb) * BITS_PER_BYTE) 24 25extern unsigned int __sw_hweight8(unsigned int w); 26extern unsigned int __sw_hweight16(unsigned int w); 27extern unsigned int __sw_hweight32(unsigned int w); 28extern unsigned long __sw_hweight64(__u64 w); 29 30/* 31 * Defined here because those may be needed by architecture-specific static 32 * inlines. 33 */ 34 35#define bitop(op, nr, addr) \ 36 op(nr, addr) 37 38#define __set_bit(nr, addr) bitop(___set_bit, nr, addr) 39#define __clear_bit(nr, addr) bitop(___clear_bit, nr, addr) 40#define __change_bit(nr, addr) bitop(___change_bit, nr, addr) 41#define __test_and_set_bit(nr, addr) bitop(___test_and_set_bit, nr, addr) 42#define __test_and_clear_bit(nr, addr) bitop(___test_and_clear_bit, nr, addr) 43#define __test_and_change_bit(nr, addr) bitop(___test_and_change_bit, nr, addr) 44#define test_bit(nr, addr) bitop(_test_bit, nr, addr) 45 46/* 47 * Include this here because some architectures need generic_ffs/fls in 48 * scope 49 * 50 * XXX: this needs to be asm/bitops.h, when we get to per arch optimizations 51 */ 52#include <asm-generic/bitops.h> 53 54#define for_each_set_bit(bit, addr, size) \ 55 for ((bit) = find_first_bit((addr), (size)); \ 56 (bit) < (size); \ 57 (bit) = find_next_bit((addr), (size), (bit) + 1)) 58 59#define for_each_clear_bit(bit, addr, size) \ 60 for ((bit) = find_first_zero_bit((addr), (size)); \ 61 (bit) < (size); \ 62 (bit) = find_next_zero_bit((addr), (size), (bit) + 1)) 63 64/* same as for_each_set_bit() but use bit as value to start with */ 65#define for_each_set_bit_from(bit, addr, size) \ 66 for ((bit) = find_next_bit((addr), (size), (bit)); \ 67 (bit) < (size); \ 68 (bit) = find_next_bit((addr), (size), (bit) + 1)) 69 70static inline unsigned long hweight_long(unsigned long w) 71{ 72 return sizeof(w) == 4 ? hweight32(w) : hweight64(w); 73} 74 75static inline unsigned int fls_long(unsigned long l) 76{ 77 if (sizeof(l) == 4) 78 return fls(l); 79 return fls64(l); 80} 81 82/** 83 * rol32 - rotate a 32-bit value left 84 * @word: value to rotate 85 * @shift: bits to roll 86 */ 87static inline __u32 rol32(__u32 word, unsigned int shift) 88{ 89 return (word << shift) | (word >> ((-shift) & 31)); 90} 91 92/** 93 * sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit 94 * @value: value to sign extend 95 * @index: 0 based bit index (0<=index<64) to sign bit 96 */ 97static __always_inline __s64 sign_extend64(__u64 value, int index) 98{ 99 __u8 shift = 63 - index; 100 return (__s64)(value << shift) >> shift; 101} 102 103#endif