Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_BITOPS_H
3#define _LINUX_BITOPS_H
4
5#include <asm/types.h>
6#include <linux/bits.h>
7#include <linux/typecheck.h>
8
9#include <uapi/linux/kernel.h>
10
11#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
12#define BITS_TO_LONGS(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
13#define BITS_TO_U64(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u64))
14#define BITS_TO_U32(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u32))
15#define BITS_TO_BYTES(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(char))
16
17#define BYTES_TO_BITS(nb) ((nb) * BITS_PER_BYTE)
18
19extern unsigned int __sw_hweight8(unsigned int w);
20extern unsigned int __sw_hweight16(unsigned int w);
21extern unsigned int __sw_hweight32(unsigned int w);
22extern unsigned long __sw_hweight64(__u64 w);
23
24/*
25 * Defined here because those may be needed by architecture-specific static
26 * inlines.
27 */
28
29#include <asm-generic/bitops/generic-non-atomic.h>
30
31/*
32 * Many architecture-specific non-atomic bitops contain inline asm code and due
33 * to that the compiler can't optimize them to compile-time expressions or
34 * constants. In contrary, generic_*() helpers are defined in pure C and
35 * compilers optimize them just well.
36 * Therefore, to make `unsigned long foo = 0; __set_bit(BAR, &foo)` effectively
37 * equal to `unsigned long foo = BIT(BAR)`, pick the generic C alternative when
38 * the arguments can be resolved at compile time. That expression itself is a
39 * constant and doesn't bring any functional changes to the rest of cases.
40 * The casts to `uintptr_t` are needed to mitigate `-Waddress` warnings when
41 * passing a bitmap from .bss or .data (-> `!!addr` is always true).
42 */
43#define bitop(op, nr, addr) \
44 ((__builtin_constant_p(nr) && \
45 __builtin_constant_p((uintptr_t)(addr) != (uintptr_t)NULL) && \
46 (uintptr_t)(addr) != (uintptr_t)NULL && \
47 __builtin_constant_p(*(const unsigned long *)(addr))) ? \
48 const##op(nr, addr) : op(nr, addr))
49
50/*
51 * The following macros are non-atomic versions of their non-underscored
52 * counterparts.
53 */
54#define __set_bit(nr, addr) bitop(___set_bit, nr, addr)
55#define __clear_bit(nr, addr) bitop(___clear_bit, nr, addr)
56#define __change_bit(nr, addr) bitop(___change_bit, nr, addr)
57#define __test_and_set_bit(nr, addr) bitop(___test_and_set_bit, nr, addr)
58#define __test_and_clear_bit(nr, addr) bitop(___test_and_clear_bit, nr, addr)
59#define __test_and_change_bit(nr, addr) bitop(___test_and_change_bit, nr, addr)
60
61#define test_bit(nr, addr) bitop(_test_bit, nr, addr)
62#define test_bit_acquire(nr, addr) bitop(_test_bit_acquire, nr, addr)
63
64/*
65 * Include this here because some architectures need generic_ffs/fls in
66 * scope
67 */
68#include <asm/bitops.h>
69
70/* Check that the bitops prototypes are sane */
71#define __check_bitop_pr(name) \
72 static_assert(__same_type(arch_##name, generic_##name) && \
73 __same_type(const_##name, generic_##name) && \
74 __same_type(_##name, generic_##name))
75
76__check_bitop_pr(__set_bit);
77__check_bitop_pr(__clear_bit);
78__check_bitop_pr(__change_bit);
79__check_bitop_pr(__test_and_set_bit);
80__check_bitop_pr(__test_and_clear_bit);
81__check_bitop_pr(__test_and_change_bit);
82__check_bitop_pr(test_bit);
83__check_bitop_pr(test_bit_acquire);
84
85#undef __check_bitop_pr
86
87static inline int get_bitmask_order(unsigned int count)
88{
89 int order;
90
91 order = fls(count);
92 return order; /* We could be slightly more clever with -1 here... */
93}
94
95static __always_inline unsigned long hweight_long(unsigned long w)
96{
97 return sizeof(w) == 4 ? hweight32(w) : hweight64((__u64)w);
98}
99
100/**
101 * rol64 - rotate a 64-bit value left
102 * @word: value to rotate
103 * @shift: bits to roll
104 */
105static inline __u64 rol64(__u64 word, unsigned int shift)
106{
107 return (word << (shift & 63)) | (word >> ((-shift) & 63));
108}
109
110/**
111 * ror64 - rotate a 64-bit value right
112 * @word: value to rotate
113 * @shift: bits to roll
114 */
115static inline __u64 ror64(__u64 word, unsigned int shift)
116{
117 return (word >> (shift & 63)) | (word << ((-shift) & 63));
118}
119
120/**
121 * rol32 - rotate a 32-bit value left
122 * @word: value to rotate
123 * @shift: bits to roll
124 */
125static inline __u32 rol32(__u32 word, unsigned int shift)
126{
127 return (word << (shift & 31)) | (word >> ((-shift) & 31));
128}
129
130/**
131 * ror32 - rotate a 32-bit value right
132 * @word: value to rotate
133 * @shift: bits to roll
134 */
135static inline __u32 ror32(__u32 word, unsigned int shift)
136{
137 return (word >> (shift & 31)) | (word << ((-shift) & 31));
138}
139
140/**
141 * rol16 - rotate a 16-bit value left
142 * @word: value to rotate
143 * @shift: bits to roll
144 */
145static inline __u16 rol16(__u16 word, unsigned int shift)
146{
147 return (word << (shift & 15)) | (word >> ((-shift) & 15));
148}
149
150/**
151 * ror16 - rotate a 16-bit value right
152 * @word: value to rotate
153 * @shift: bits to roll
154 */
155static inline __u16 ror16(__u16 word, unsigned int shift)
156{
157 return (word >> (shift & 15)) | (word << ((-shift) & 15));
158}
159
160/**
161 * rol8 - rotate an 8-bit value left
162 * @word: value to rotate
163 * @shift: bits to roll
164 */
165static inline __u8 rol8(__u8 word, unsigned int shift)
166{
167 return (word << (shift & 7)) | (word >> ((-shift) & 7));
168}
169
170/**
171 * ror8 - rotate an 8-bit value right
172 * @word: value to rotate
173 * @shift: bits to roll
174 */
175static inline __u8 ror8(__u8 word, unsigned int shift)
176{
177 return (word >> (shift & 7)) | (word << ((-shift) & 7));
178}
179
180/**
181 * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit
182 * @value: value to sign extend
183 * @index: 0 based bit index (0<=index<32) to sign bit
184 *
185 * This is safe to use for 16- and 8-bit types as well.
186 */
187static __always_inline __s32 sign_extend32(__u32 value, int index)
188{
189 __u8 shift = 31 - index;
190 return (__s32)(value << shift) >> shift;
191}
192
193/**
194 * sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit
195 * @value: value to sign extend
196 * @index: 0 based bit index (0<=index<64) to sign bit
197 */
198static __always_inline __s64 sign_extend64(__u64 value, int index)
199{
200 __u8 shift = 63 - index;
201 return (__s64)(value << shift) >> shift;
202}
203
204static inline unsigned int fls_long(unsigned long l)
205{
206 if (sizeof(l) == 4)
207 return fls(l);
208 return fls64(l);
209}
210
211static inline int get_count_order(unsigned int count)
212{
213 if (count == 0)
214 return -1;
215
216 return fls(--count);
217}
218
219/**
220 * get_count_order_long - get order after rounding @l up to power of 2
221 * @l: parameter
222 *
223 * it is same as get_count_order() but with long type parameter
224 */
225static inline int get_count_order_long(unsigned long l)
226{
227 if (l == 0UL)
228 return -1;
229 return (int)fls_long(--l);
230}
231
232/**
233 * __ffs64 - find first set bit in a 64 bit word
234 * @word: The 64 bit word
235 *
236 * On 64 bit arches this is a synonym for __ffs
237 * The result is not defined if no bits are set, so check that @word
238 * is non-zero before calling this.
239 */
240static inline unsigned int __ffs64(u64 word)
241{
242#if BITS_PER_LONG == 32
243 if (((u32)word) == 0UL)
244 return __ffs((u32)(word >> 32)) + 32;
245#elif BITS_PER_LONG != 64
246#error BITS_PER_LONG not 32 or 64
247#endif
248 return __ffs((unsigned long)word);
249}
250
251/**
252 * fns - find N'th set bit in a word
253 * @word: The word to search
254 * @n: Bit to find
255 */
256static inline unsigned int fns(unsigned long word, unsigned int n)
257{
258 while (word && n--)
259 word &= word - 1;
260
261 return word ? __ffs(word) : BITS_PER_LONG;
262}
263
264/**
265 * assign_bit - Assign value to a bit in memory
266 * @nr: the bit to set
267 * @addr: the address to start counting from
268 * @value: the value to assign
269 */
270#define assign_bit(nr, addr, value) \
271 ((value) ? set_bit((nr), (addr)) : clear_bit((nr), (addr)))
272
273#define __assign_bit(nr, addr, value) \
274 ((value) ? __set_bit((nr), (addr)) : __clear_bit((nr), (addr)))
275
276/**
277 * __ptr_set_bit - Set bit in a pointer's value
278 * @nr: the bit to set
279 * @addr: the address of the pointer variable
280 *
281 * Example:
282 * void *p = foo();
283 * __ptr_set_bit(bit, &p);
284 */
285#define __ptr_set_bit(nr, addr) \
286 ({ \
287 typecheck_pointer(*(addr)); \
288 __set_bit(nr, (unsigned long *)(addr)); \
289 })
290
291/**
292 * __ptr_clear_bit - Clear bit in a pointer's value
293 * @nr: the bit to clear
294 * @addr: the address of the pointer variable
295 *
296 * Example:
297 * void *p = foo();
298 * __ptr_clear_bit(bit, &p);
299 */
300#define __ptr_clear_bit(nr, addr) \
301 ({ \
302 typecheck_pointer(*(addr)); \
303 __clear_bit(nr, (unsigned long *)(addr)); \
304 })
305
306/**
307 * __ptr_test_bit - Test bit in a pointer's value
308 * @nr: the bit to test
309 * @addr: the address of the pointer variable
310 *
311 * Example:
312 * void *p = foo();
313 * if (__ptr_test_bit(bit, &p)) {
314 * ...
315 * } else {
316 * ...
317 * }
318 */
319#define __ptr_test_bit(nr, addr) \
320 ({ \
321 typecheck_pointer(*(addr)); \
322 test_bit(nr, (unsigned long *)(addr)); \
323 })
324
325#ifdef __KERNEL__
326
327#ifndef set_mask_bits
328#define set_mask_bits(ptr, mask, bits) \
329({ \
330 const typeof(*(ptr)) mask__ = (mask), bits__ = (bits); \
331 typeof(*(ptr)) old__, new__; \
332 \
333 old__ = READ_ONCE(*(ptr)); \
334 do { \
335 new__ = (old__ & ~mask__) | bits__; \
336 } while (!try_cmpxchg(ptr, &old__, new__)); \
337 \
338 old__; \
339})
340#endif
341
342#ifndef bit_clear_unless
343#define bit_clear_unless(ptr, clear, test) \
344({ \
345 const typeof(*(ptr)) clear__ = (clear), test__ = (test);\
346 typeof(*(ptr)) old__, new__; \
347 \
348 old__ = READ_ONCE(*(ptr)); \
349 do { \
350 if (old__ & test__) \
351 break; \
352 new__ = old__ & ~clear__; \
353 } while (!try_cmpxchg(ptr, &old__, new__)); \
354 \
355 !(old__ & test__); \
356})
357#endif
358
359#endif /* __KERNEL__ */
360#endif