Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_BITOPS_H
3#define _LINUX_BITOPS_H
4
5#include <asm/types.h>
6#include <linux/bits.h>
7#include <linux/typecheck.h>
8
9#include <uapi/linux/kernel.h>
10
11/* Set bits in the first 'n' bytes when loaded from memory */
12#ifdef __LITTLE_ENDIAN
13# define aligned_byte_mask(n) ((1UL << 8*(n))-1)
14#else
15# define aligned_byte_mask(n) (~0xffUL << (BITS_PER_LONG - 8 - 8*(n)))
16#endif
17
18#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
19#define BITS_TO_LONGS(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
20#define BITS_TO_U64(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u64))
21#define BITS_TO_U32(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u32))
22#define BITS_TO_BYTES(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(char))
23
24extern unsigned int __sw_hweight8(unsigned int w);
25extern unsigned int __sw_hweight16(unsigned int w);
26extern unsigned int __sw_hweight32(unsigned int w);
27extern unsigned long __sw_hweight64(__u64 w);
28
29/*
30 * Include this here because some architectures need generic_ffs/fls in
31 * scope
32 */
33#include <asm/bitops.h>
34
35static inline int get_bitmask_order(unsigned int count)
36{
37 int order;
38
39 order = fls(count);
40 return order; /* We could be slightly more clever with -1 here... */
41}
42
43static __always_inline unsigned long hweight_long(unsigned long w)
44{
45 return sizeof(w) == 4 ? hweight32(w) : hweight64((__u64)w);
46}
47
48/**
49 * rol64 - rotate a 64-bit value left
50 * @word: value to rotate
51 * @shift: bits to roll
52 */
53static inline __u64 rol64(__u64 word, unsigned int shift)
54{
55 return (word << (shift & 63)) | (word >> ((-shift) & 63));
56}
57
58/**
59 * ror64 - rotate a 64-bit value right
60 * @word: value to rotate
61 * @shift: bits to roll
62 */
63static inline __u64 ror64(__u64 word, unsigned int shift)
64{
65 return (word >> (shift & 63)) | (word << ((-shift) & 63));
66}
67
68/**
69 * rol32 - rotate a 32-bit value left
70 * @word: value to rotate
71 * @shift: bits to roll
72 */
73static inline __u32 rol32(__u32 word, unsigned int shift)
74{
75 return (word << (shift & 31)) | (word >> ((-shift) & 31));
76}
77
78/**
79 * ror32 - rotate a 32-bit value right
80 * @word: value to rotate
81 * @shift: bits to roll
82 */
83static inline __u32 ror32(__u32 word, unsigned int shift)
84{
85 return (word >> (shift & 31)) | (word << ((-shift) & 31));
86}
87
88/**
89 * rol16 - rotate a 16-bit value left
90 * @word: value to rotate
91 * @shift: bits to roll
92 */
93static inline __u16 rol16(__u16 word, unsigned int shift)
94{
95 return (word << (shift & 15)) | (word >> ((-shift) & 15));
96}
97
98/**
99 * ror16 - rotate a 16-bit value right
100 * @word: value to rotate
101 * @shift: bits to roll
102 */
103static inline __u16 ror16(__u16 word, unsigned int shift)
104{
105 return (word >> (shift & 15)) | (word << ((-shift) & 15));
106}
107
108/**
109 * rol8 - rotate an 8-bit value left
110 * @word: value to rotate
111 * @shift: bits to roll
112 */
113static inline __u8 rol8(__u8 word, unsigned int shift)
114{
115 return (word << (shift & 7)) | (word >> ((-shift) & 7));
116}
117
118/**
119 * ror8 - rotate an 8-bit value right
120 * @word: value to rotate
121 * @shift: bits to roll
122 */
123static inline __u8 ror8(__u8 word, unsigned int shift)
124{
125 return (word >> (shift & 7)) | (word << ((-shift) & 7));
126}
127
128/**
129 * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit
130 * @value: value to sign extend
131 * @index: 0 based bit index (0<=index<32) to sign bit
132 *
133 * This is safe to use for 16- and 8-bit types as well.
134 */
135static __always_inline __s32 sign_extend32(__u32 value, int index)
136{
137 __u8 shift = 31 - index;
138 return (__s32)(value << shift) >> shift;
139}
140
141/**
142 * sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit
143 * @value: value to sign extend
144 * @index: 0 based bit index (0<=index<64) to sign bit
145 */
146static __always_inline __s64 sign_extend64(__u64 value, int index)
147{
148 __u8 shift = 63 - index;
149 return (__s64)(value << shift) >> shift;
150}
151
152static inline unsigned fls_long(unsigned long l)
153{
154 if (sizeof(l) == 4)
155 return fls(l);
156 return fls64(l);
157}
158
159static inline int get_count_order(unsigned int count)
160{
161 if (count == 0)
162 return -1;
163
164 return fls(--count);
165}
166
167/**
168 * get_count_order_long - get order after rounding @l up to power of 2
169 * @l: parameter
170 *
171 * it is same as get_count_order() but with long type parameter
172 */
173static inline int get_count_order_long(unsigned long l)
174{
175 if (l == 0UL)
176 return -1;
177 return (int)fls_long(--l);
178}
179
180/**
181 * __ffs64 - find first set bit in a 64 bit word
182 * @word: The 64 bit word
183 *
184 * On 64 bit arches this is a synonym for __ffs
185 * The result is not defined if no bits are set, so check that @word
186 * is non-zero before calling this.
187 */
188static inline unsigned long __ffs64(u64 word)
189{
190#if BITS_PER_LONG == 32
191 if (((u32)word) == 0UL)
192 return __ffs((u32)(word >> 32)) + 32;
193#elif BITS_PER_LONG != 64
194#error BITS_PER_LONG not 32 or 64
195#endif
196 return __ffs((unsigned long)word);
197}
198
199/**
200 * assign_bit - Assign value to a bit in memory
201 * @nr: the bit to set
202 * @addr: the address to start counting from
203 * @value: the value to assign
204 */
205static __always_inline void assign_bit(long nr, volatile unsigned long *addr,
206 bool value)
207{
208 if (value)
209 set_bit(nr, addr);
210 else
211 clear_bit(nr, addr);
212}
213
214static __always_inline void __assign_bit(long nr, volatile unsigned long *addr,
215 bool value)
216{
217 if (value)
218 __set_bit(nr, addr);
219 else
220 __clear_bit(nr, addr);
221}
222
223/**
224 * __ptr_set_bit - Set bit in a pointer's value
225 * @nr: the bit to set
226 * @addr: the address of the pointer variable
227 *
228 * Example:
229 * void *p = foo();
230 * __ptr_set_bit(bit, &p);
231 */
232#define __ptr_set_bit(nr, addr) \
233 ({ \
234 typecheck_pointer(*(addr)); \
235 __set_bit(nr, (unsigned long *)(addr)); \
236 })
237
238/**
239 * __ptr_clear_bit - Clear bit in a pointer's value
240 * @nr: the bit to clear
241 * @addr: the address of the pointer variable
242 *
243 * Example:
244 * void *p = foo();
245 * __ptr_clear_bit(bit, &p);
246 */
247#define __ptr_clear_bit(nr, addr) \
248 ({ \
249 typecheck_pointer(*(addr)); \
250 __clear_bit(nr, (unsigned long *)(addr)); \
251 })
252
253/**
254 * __ptr_test_bit - Test bit in a pointer's value
255 * @nr: the bit to test
256 * @addr: the address of the pointer variable
257 *
258 * Example:
259 * void *p = foo();
260 * if (__ptr_test_bit(bit, &p)) {
261 * ...
262 * } else {
263 * ...
264 * }
265 */
266#define __ptr_test_bit(nr, addr) \
267 ({ \
268 typecheck_pointer(*(addr)); \
269 test_bit(nr, (unsigned long *)(addr)); \
270 })
271
272#ifdef __KERNEL__
273
274#ifndef set_mask_bits
275#define set_mask_bits(ptr, mask, bits) \
276({ \
277 const typeof(*(ptr)) mask__ = (mask), bits__ = (bits); \
278 typeof(*(ptr)) old__, new__; \
279 \
280 do { \
281 old__ = READ_ONCE(*(ptr)); \
282 new__ = (old__ & ~mask__) | bits__; \
283 } while (cmpxchg(ptr, old__, new__) != old__); \
284 \
285 old__; \
286})
287#endif
288
289#ifndef bit_clear_unless
290#define bit_clear_unless(ptr, clear, test) \
291({ \
292 const typeof(*(ptr)) clear__ = (clear), test__ = (test);\
293 typeof(*(ptr)) old__, new__; \
294 \
295 do { \
296 old__ = READ_ONCE(*(ptr)); \
297 new__ = old__ & ~clear__; \
298 } while (!(old__ & test__) && \
299 cmpxchg(ptr, old__, new__) != old__); \
300 \
301 !(old__ & test__); \
302})
303#endif
304
305#endif /* __KERNEL__ */
306#endif