Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2012 Regents of the University of California
4 */
5
6#ifndef _ASM_RISCV_BITOPS_H
7#define _ASM_RISCV_BITOPS_H
8
9#ifndef _LINUX_BITOPS_H
10#error "Only <linux/bitops.h> can be included directly"
11#endif /* _LINUX_BITOPS_H */
12
13#include <linux/compiler.h>
14#include <asm/barrier.h>
15#include <asm/bitsperlong.h>
16
17#if !(defined(CONFIG_RISCV_ISA_ZBB) && defined(CONFIG_TOOLCHAIN_HAS_ZBB)) || defined(NO_ALTERNATIVE)
18#include <asm-generic/bitops/__ffs.h>
19#include <asm-generic/bitops/__fls.h>
20#include <asm-generic/bitops/ffs.h>
21#include <asm-generic/bitops/fls.h>
22
23#else
24#define __HAVE_ARCH___FFS
25#define __HAVE_ARCH___FLS
26#define __HAVE_ARCH_FFS
27#define __HAVE_ARCH_FLS
28
29#include <asm-generic/bitops/__ffs.h>
30#include <asm-generic/bitops/__fls.h>
31#include <asm-generic/bitops/ffs.h>
32#include <asm-generic/bitops/fls.h>
33
34#include <asm/alternative-macros.h>
35#include <asm/hwcap.h>
36
37#if (BITS_PER_LONG == 64)
38#define CTZW "ctzw "
39#define CLZW "clzw "
40#elif (BITS_PER_LONG == 32)
41#define CTZW "ctz "
42#define CLZW "clz "
43#else
44#error "Unexpected BITS_PER_LONG"
45#endif
46
47static __always_inline __attribute_const__ unsigned long variable__ffs(unsigned long word)
48{
49 if (!riscv_has_extension_likely(RISCV_ISA_EXT_ZBB))
50 return generic___ffs(word);
51
52 asm volatile (".option push\n"
53 ".option arch,+zbb\n"
54 "ctz %0, %1\n"
55 ".option pop\n"
56 : "=r" (word) : "r" (word) :);
57
58 return word;
59}
60
61/**
62 * __ffs - find first set bit in a long word
63 * @word: The word to search
64 *
65 * Undefined if no set bit exists, so code should check against 0 first.
66 */
67#define __ffs(word) \
68 (__builtin_constant_p(word) ? \
69 (unsigned long)__builtin_ctzl(word) : \
70 variable__ffs(word))
71
72static __always_inline __attribute_const__ unsigned long variable__fls(unsigned long word)
73{
74 if (!riscv_has_extension_likely(RISCV_ISA_EXT_ZBB))
75 return generic___fls(word);
76
77 asm volatile (".option push\n"
78 ".option arch,+zbb\n"
79 "clz %0, %1\n"
80 ".option pop\n"
81 : "=r" (word) : "r" (word) :);
82
83 return BITS_PER_LONG - 1 - word;
84}
85
86/**
87 * __fls - find last set bit in a long word
88 * @word: the word to search
89 *
90 * Undefined if no set bit exists, so code should check against 0 first.
91 */
92#define __fls(word) \
93 (__builtin_constant_p(word) ? \
94 (unsigned long)(BITS_PER_LONG - 1 - __builtin_clzl(word)) : \
95 variable__fls(word))
96
97static __always_inline __attribute_const__ int variable_ffs(int x)
98{
99 if (!riscv_has_extension_likely(RISCV_ISA_EXT_ZBB))
100 return generic_ffs(x);
101
102 if (!x)
103 return 0;
104
105 asm volatile (".option push\n"
106 ".option arch,+zbb\n"
107 CTZW "%0, %1\n"
108 ".option pop\n"
109 : "=r" (x) : "r" (x) :);
110
111 return x + 1;
112}
113
114/**
115 * ffs - find first set bit in a word
116 * @x: the word to search
117 *
118 * This is defined the same way as the libc and compiler builtin ffs routines.
119 *
120 * ffs(value) returns 0 if value is 0 or the position of the first set bit if
121 * value is nonzero. The first (least significant) bit is at position 1.
122 */
123#define ffs(x) (__builtin_constant_p(x) ? __builtin_ffs(x) : variable_ffs(x))
124
125static __always_inline int variable_fls(unsigned int x)
126{
127 if (!riscv_has_extension_likely(RISCV_ISA_EXT_ZBB))
128 return generic_fls(x);
129
130 if (!x)
131 return 0;
132
133 asm volatile (".option push\n"
134 ".option arch,+zbb\n"
135 CLZW "%0, %1\n"
136 ".option pop\n"
137 : "=r" (x) : "r" (x) :);
138
139 return 32 - x;
140}
141
142/**
143 * fls - find last set bit in a word
144 * @x: the word to search
145 *
146 * This is defined in a similar way as ffs, but returns the position of the most
147 * significant set bit.
148 *
149 * fls(value) returns 0 if value is 0 or the position of the last set bit if
150 * value is nonzero. The last (most significant) bit is at position 32.
151 */
152#define fls(x) \
153({ \
154 typeof(x) x_ = (x); \
155 __builtin_constant_p(x_) ? \
156 ((x_ != 0) ? (32 - __builtin_clz(x_)) : 0) \
157 : \
158 variable_fls(x_); \
159})
160
161#endif /* !(defined(CONFIG_RISCV_ISA_ZBB) && defined(CONFIG_TOOLCHAIN_HAS_ZBB)) || defined(NO_ALTERNATIVE) */
162
163#include <asm-generic/bitops/ffz.h>
164#include <asm-generic/bitops/fls64.h>
165#include <asm-generic/bitops/sched.h>
166
167#include <asm/arch_hweight.h>
168
169#include <asm-generic/bitops/const_hweight.h>
170
171#if (BITS_PER_LONG == 64)
172#define __AMO(op) "amo" #op ".d"
173#elif (BITS_PER_LONG == 32)
174#define __AMO(op) "amo" #op ".w"
175#else
176#error "Unexpected BITS_PER_LONG"
177#endif
178
179#define __test_and_op_bit_ord(op, mod, nr, addr, ord) \
180({ \
181 unsigned long __res, __mask; \
182 __mask = BIT_MASK(nr); \
183 __asm__ __volatile__ ( \
184 __AMO(op) #ord " %0, %2, %1" \
185 : "=r" (__res), "+A" (addr[BIT_WORD(nr)]) \
186 : "r" (mod(__mask)) \
187 : "memory"); \
188 ((__res & __mask) != 0); \
189})
190
191#define __op_bit_ord(op, mod, nr, addr, ord) \
192 __asm__ __volatile__ ( \
193 __AMO(op) #ord " zero, %1, %0" \
194 : "+A" (addr[BIT_WORD(nr)]) \
195 : "r" (mod(BIT_MASK(nr))) \
196 : "memory");
197
198#define __test_and_op_bit(op, mod, nr, addr) \
199 __test_and_op_bit_ord(op, mod, nr, addr, .aqrl)
200#define __op_bit(op, mod, nr, addr) \
201 __op_bit_ord(op, mod, nr, addr, )
202
203/* Bitmask modifiers */
204#define __NOP(x) (x)
205#define __NOT(x) (~(x))
206
207/**
208 * arch_test_and_set_bit - Set a bit and return its old value
209 * @nr: Bit to set
210 * @addr: Address to count from
211 *
212 * This is an atomic fully-ordered operation (implied full memory barrier).
213 */
214static __always_inline int arch_test_and_set_bit(int nr, volatile unsigned long *addr)
215{
216 return __test_and_op_bit(or, __NOP, nr, addr);
217}
218
219/**
220 * arch_test_and_clear_bit - Clear a bit and return its old value
221 * @nr: Bit to clear
222 * @addr: Address to count from
223 *
224 * This is an atomic fully-ordered operation (implied full memory barrier).
225 */
226static __always_inline int arch_test_and_clear_bit(int nr, volatile unsigned long *addr)
227{
228 return __test_and_op_bit(and, __NOT, nr, addr);
229}
230
231/**
232 * arch_test_and_change_bit - Change a bit and return its old value
233 * @nr: Bit to change
234 * @addr: Address to count from
235 *
236 * This operation is atomic and cannot be reordered.
237 * It also implies a memory barrier.
238 */
239static __always_inline int arch_test_and_change_bit(int nr, volatile unsigned long *addr)
240{
241 return __test_and_op_bit(xor, __NOP, nr, addr);
242}
243
244/**
245 * arch_set_bit - Atomically set a bit in memory
246 * @nr: the bit to set
247 * @addr: the address to start counting from
248 *
249 * Note: there are no guarantees that this function will not be reordered
250 * on non x86 architectures, so if you are writing portable code,
251 * make sure not to rely on its reordering guarantees.
252 *
253 * Note that @nr may be almost arbitrarily large; this function is not
254 * restricted to acting on a single-word quantity.
255 */
256static __always_inline void arch_set_bit(int nr, volatile unsigned long *addr)
257{
258 __op_bit(or, __NOP, nr, addr);
259}
260
261/**
262 * arch_clear_bit - Clears a bit in memory
263 * @nr: Bit to clear
264 * @addr: Address to start counting from
265 *
266 * Note: there are no guarantees that this function will not be reordered
267 * on non x86 architectures, so if you are writing portable code,
268 * make sure not to rely on its reordering guarantees.
269 */
270static __always_inline void arch_clear_bit(int nr, volatile unsigned long *addr)
271{
272 __op_bit(and, __NOT, nr, addr);
273}
274
275/**
276 * arch_change_bit - Toggle a bit in memory
277 * @nr: Bit to change
278 * @addr: Address to start counting from
279 *
280 * change_bit() may be reordered on other architectures than x86.
281 * Note that @nr may be almost arbitrarily large; this function is not
282 * restricted to acting on a single-word quantity.
283 */
284static __always_inline void arch_change_bit(int nr, volatile unsigned long *addr)
285{
286 __op_bit(xor, __NOP, nr, addr);
287}
288
289/**
290 * arch_test_and_set_bit_lock - Set a bit and return its old value, for lock
291 * @nr: Bit to set
292 * @addr: Address to count from
293 *
294 * This operation is atomic and provides acquire barrier semantics.
295 * It can be used to implement bit locks.
296 */
297static __always_inline int arch_test_and_set_bit_lock(
298 unsigned long nr, volatile unsigned long *addr)
299{
300 return __test_and_op_bit_ord(or, __NOP, nr, addr, .aq);
301}
302
303/**
304 * arch_clear_bit_unlock - Clear a bit in memory, for unlock
305 * @nr: the bit to set
306 * @addr: the address to start counting from
307 *
308 * This operation is atomic and provides release barrier semantics.
309 */
310static __always_inline void arch_clear_bit_unlock(
311 unsigned long nr, volatile unsigned long *addr)
312{
313 __op_bit_ord(and, __NOT, nr, addr, .rl);
314}
315
316/**
317 * arch___clear_bit_unlock - Clear a bit in memory, for unlock
318 * @nr: the bit to set
319 * @addr: the address to start counting from
320 *
321 * This operation is like clear_bit_unlock, however it is not atomic.
322 * It does provide release barrier semantics so it can be used to unlock
323 * a bit lock, however it would only be used if no other CPU can modify
324 * any bits in the memory until the lock is released (a good example is
325 * if the bit lock itself protects access to the other bits in the word).
326 *
327 * On RISC-V systems there seems to be no benefit to taking advantage of the
328 * non-atomic property here: it's a lot more instructions and we still have to
329 * provide release semantics anyway.
330 */
331static __always_inline void arch___clear_bit_unlock(
332 unsigned long nr, volatile unsigned long *addr)
333{
334 arch_clear_bit_unlock(nr, addr);
335}
336
337static __always_inline bool arch_xor_unlock_is_negative_byte(unsigned long mask,
338 volatile unsigned long *addr)
339{
340 unsigned long res;
341 __asm__ __volatile__ (
342 __AMO(xor) ".rl %0, %2, %1"
343 : "=r" (res), "+A" (*addr)
344 : "r" (__NOP(mask))
345 : "memory");
346 return (res & BIT(7)) != 0;
347}
348
349#undef __test_and_op_bit
350#undef __op_bit
351#undef __NOP
352#undef __NOT
353#undef __AMO
354
355#include <asm-generic/bitops/instrumented-atomic.h>
356#include <asm-generic/bitops/instrumented-lock.h>
357
358#include <asm-generic/bitops/non-atomic.h>
359#include <asm-generic/bitops/le.h>
360#include <asm-generic/bitops/ext2-atomic.h>
361
362#endif /* _ASM_RISCV_BITOPS_H */