Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Variant of atomic_t specialized for reference counts.
4 *
5 * The interface matches the atomic_t interface (to aid in porting) but only
6 * provides the few functions one should use for reference counting.
7 *
8 * Saturation semantics
9 * ====================
10 *
11 * refcount_t differs from atomic_t in that the counter saturates at
12 * REFCOUNT_SATURATED and will not move once there. This avoids wrapping the
13 * counter and causing 'spurious' use-after-free issues. In order to avoid the
14 * cost associated with introducing cmpxchg() loops into all of the saturating
15 * operations, we temporarily allow the counter to take on an unchecked value
16 * and then explicitly set it to REFCOUNT_SATURATED on detecting that underflow
17 * or overflow has occurred. Although this is racy when multiple threads
18 * access the refcount concurrently, by placing REFCOUNT_SATURATED roughly
19 * equidistant from 0 and INT_MAX we minimise the scope for error:
20 *
21 * INT_MAX REFCOUNT_SATURATED UINT_MAX
22 * 0 (0x7fff_ffff) (0xc000_0000) (0xffff_ffff)
23 * +--------------------------------+----------------+----------------+
24 * <---------- bad value! ---------->
25 *
26 * (in a signed view of the world, the "bad value" range corresponds to
27 * a negative counter value).
28 *
29 * As an example, consider a refcount_inc() operation that causes the counter
30 * to overflow:
31 *
32 * int old = atomic_fetch_add_relaxed(r);
33 * // old is INT_MAX, refcount now INT_MIN (0x8000_0000)
34 * if (old < 0)
35 * atomic_set(r, REFCOUNT_SATURATED);
36 *
37 * If another thread also performs a refcount_inc() operation between the two
38 * atomic operations, then the count will continue to edge closer to 0. If it
39 * reaches a value of 1 before /any/ of the threads reset it to the saturated
40 * value, then a concurrent refcount_dec_and_test() may erroneously free the
41 * underlying object.
42 * Linux limits the maximum number of tasks to PID_MAX_LIMIT, which is currently
43 * 0x400000 (and can't easily be raised in the future beyond FUTEX_TID_MASK).
44 * With the current PID limit, if no batched refcounting operations are used and
45 * the attacker can't repeatedly trigger kernel oopses in the middle of refcount
46 * operations, this makes it impossible for a saturated refcount to leave the
47 * saturation range, even if it is possible for multiple uses of the same
48 * refcount to nest in the context of a single task:
49 *
50 * (UINT_MAX+1-REFCOUNT_SATURATED) / PID_MAX_LIMIT =
51 * 0x40000000 / 0x400000 = 0x100 = 256
52 *
53 * If hundreds of references are added/removed with a single refcounting
54 * operation, it may potentially be possible to leave the saturation range; but
55 * given the precise timing details involved with the round-robin scheduling of
56 * each thread manipulating the refcount and the need to hit the race multiple
57 * times in succession, there doesn't appear to be a practical avenue of attack
58 * even if using refcount_add() operations with larger increments.
59 *
60 * Memory ordering
61 * ===============
62 *
63 * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
64 * and provide only what is strictly required for refcounts.
65 *
66 * The increments are fully relaxed; these will not provide ordering. The
67 * rationale is that whatever is used to obtain the object we're increasing the
68 * reference count on will provide the ordering. For locked data structures,
69 * its the lock acquire, for RCU/lockless data structures its the dependent
70 * load.
71 *
72 * Do note that inc_not_zero() provides a control dependency which will order
73 * future stores against the inc, this ensures we'll never modify the object
74 * if we did not in fact acquire a reference.
75 *
76 * The decrements will provide release order, such that all the prior loads and
77 * stores will be issued before, it also provides a control dependency, which
78 * will order us against the subsequent free().
79 *
80 * The control dependency is against the load of the cmpxchg (ll/sc) that
81 * succeeded. This means the stores aren't fully ordered, but this is fine
82 * because the 1->0 transition indicates no concurrency.
83 *
84 * Note that the allocator is responsible for ordering things between free()
85 * and alloc().
86 *
87 * The decrements dec_and_test() and sub_and_test() also provide acquire
88 * ordering on success.
89 *
90 */
91
92#ifndef _LINUX_REFCOUNT_H
93#define _LINUX_REFCOUNT_H
94
95#include <linux/atomic.h>
96#include <linux/bug.h>
97#include <linux/compiler.h>
98#include <linux/limits.h>
99#include <linux/spinlock_types.h>
100
101struct mutex;
102
103/**
104 * struct refcount_t - variant of atomic_t specialized for reference counts
105 * @refs: atomic_t counter field
106 *
107 * The counter saturates at REFCOUNT_SATURATED and will not move once
108 * there. This avoids wrapping the counter and causing 'spurious'
109 * use-after-free bugs.
110 */
111typedef struct refcount_struct {
112 atomic_t refs;
113} refcount_t;
114
115#define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), }
116#define REFCOUNT_MAX INT_MAX
117#define REFCOUNT_SATURATED (INT_MIN / 2)
118
119enum refcount_saturation_type {
120 REFCOUNT_ADD_NOT_ZERO_OVF,
121 REFCOUNT_ADD_OVF,
122 REFCOUNT_ADD_UAF,
123 REFCOUNT_SUB_UAF,
124 REFCOUNT_DEC_LEAK,
125};
126
127void refcount_warn_saturate(refcount_t *r, enum refcount_saturation_type t);
128
129/**
130 * refcount_set - set a refcount's value
131 * @r: the refcount
132 * @n: value to which the refcount will be set
133 */
134static inline void refcount_set(refcount_t *r, int n)
135{
136 atomic_set(&r->refs, n);
137}
138
139/**
140 * refcount_read - get a refcount's value
141 * @r: the refcount
142 *
143 * Return: the refcount's value
144 */
145static inline unsigned int refcount_read(const refcount_t *r)
146{
147 return atomic_read(&r->refs);
148}
149
150/**
151 * refcount_add_not_zero - add a value to a refcount unless it is 0
152 * @i: the value to add to the refcount
153 * @r: the refcount
154 *
155 * Will saturate at REFCOUNT_SATURATED and WARN.
156 *
157 * Provides no memory ordering, it is assumed the caller has guaranteed the
158 * object memory to be stable (RCU, etc.). It does provide a control dependency
159 * and thereby orders future stores. See the comment on top.
160 *
161 * Use of this function is not recommended for the normal reference counting
162 * use case in which references are taken and released one at a time. In these
163 * cases, refcount_inc(), or one of its variants, should instead be used to
164 * increment a reference count.
165 *
166 * Return: false if the passed refcount is 0, true otherwise
167 */
168static inline __must_check bool __refcount_add_not_zero(int i, refcount_t *r, int *oldp)
169{
170 int old = refcount_read(r);
171
172 do {
173 if (!old)
174 break;
175 } while (!atomic_try_cmpxchg_relaxed(&r->refs, &old, old + i));
176
177 if (oldp)
178 *oldp = old;
179
180 if (unlikely(old < 0 || old + i < 0))
181 refcount_warn_saturate(r, REFCOUNT_ADD_NOT_ZERO_OVF);
182
183 return old;
184}
185
186static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
187{
188 return __refcount_add_not_zero(i, r, NULL);
189}
190
191/**
192 * refcount_add - add a value to a refcount
193 * @i: the value to add to the refcount
194 * @r: the refcount
195 *
196 * Similar to atomic_add(), but will saturate at REFCOUNT_SATURATED and WARN.
197 *
198 * Provides no memory ordering, it is assumed the caller has guaranteed the
199 * object memory to be stable (RCU, etc.). It does provide a control dependency
200 * and thereby orders future stores. See the comment on top.
201 *
202 * Use of this function is not recommended for the normal reference counting
203 * use case in which references are taken and released one at a time. In these
204 * cases, refcount_inc(), or one of its variants, should instead be used to
205 * increment a reference count.
206 */
207static inline void __refcount_add(int i, refcount_t *r, int *oldp)
208{
209 int old = atomic_fetch_add_relaxed(i, &r->refs);
210
211 if (oldp)
212 *oldp = old;
213
214 if (unlikely(!old))
215 refcount_warn_saturate(r, REFCOUNT_ADD_UAF);
216 else if (unlikely(old < 0 || old + i < 0))
217 refcount_warn_saturate(r, REFCOUNT_ADD_OVF);
218}
219
220static inline void refcount_add(int i, refcount_t *r)
221{
222 __refcount_add(i, r, NULL);
223}
224
225/**
226 * refcount_inc_not_zero - increment a refcount unless it is 0
227 * @r: the refcount to increment
228 *
229 * Similar to atomic_inc_not_zero(), but will saturate at REFCOUNT_SATURATED
230 * and WARN.
231 *
232 * Provides no memory ordering, it is assumed the caller has guaranteed the
233 * object memory to be stable (RCU, etc.). It does provide a control dependency
234 * and thereby orders future stores. See the comment on top.
235 *
236 * Return: true if the increment was successful, false otherwise
237 */
238static inline __must_check bool __refcount_inc_not_zero(refcount_t *r, int *oldp)
239{
240 return __refcount_add_not_zero(1, r, oldp);
241}
242
243static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
244{
245 return __refcount_inc_not_zero(r, NULL);
246}
247
248/**
249 * refcount_inc - increment a refcount
250 * @r: the refcount to increment
251 *
252 * Similar to atomic_inc(), but will saturate at REFCOUNT_SATURATED and WARN.
253 *
254 * Provides no memory ordering, it is assumed the caller already has a
255 * reference on the object.
256 *
257 * Will WARN if the refcount is 0, as this represents a possible use-after-free
258 * condition.
259 */
260static inline void __refcount_inc(refcount_t *r, int *oldp)
261{
262 __refcount_add(1, r, oldp);
263}
264
265static inline void refcount_inc(refcount_t *r)
266{
267 __refcount_inc(r, NULL);
268}
269
270/**
271 * refcount_sub_and_test - subtract from a refcount and test if it is 0
272 * @i: amount to subtract from the refcount
273 * @r: the refcount
274 *
275 * Similar to atomic_dec_and_test(), but it will WARN, return false and
276 * ultimately leak on underflow and will fail to decrement when saturated
277 * at REFCOUNT_SATURATED.
278 *
279 * Provides release memory ordering, such that prior loads and stores are done
280 * before, and provides an acquire ordering on success such that free()
281 * must come after.
282 *
283 * Use of this function is not recommended for the normal reference counting
284 * use case in which references are taken and released one at a time. In these
285 * cases, refcount_dec(), or one of its variants, should instead be used to
286 * decrement a reference count.
287 *
288 * Return: true if the resulting refcount is 0, false otherwise
289 */
290static inline __must_check bool __refcount_sub_and_test(int i, refcount_t *r, int *oldp)
291{
292 int old = atomic_fetch_sub_release(i, &r->refs);
293
294 if (oldp)
295 *oldp = old;
296
297 if (old == i) {
298 smp_acquire__after_ctrl_dep();
299 return true;
300 }
301
302 if (unlikely(old < 0 || old - i < 0))
303 refcount_warn_saturate(r, REFCOUNT_SUB_UAF);
304
305 return false;
306}
307
308static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r)
309{
310 return __refcount_sub_and_test(i, r, NULL);
311}
312
313/**
314 * refcount_dec_and_test - decrement a refcount and test if it is 0
315 * @r: the refcount
316 *
317 * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
318 * decrement when saturated at REFCOUNT_SATURATED.
319 *
320 * Provides release memory ordering, such that prior loads and stores are done
321 * before, and provides an acquire ordering on success such that free()
322 * must come after.
323 *
324 * Return: true if the resulting refcount is 0, false otherwise
325 */
326static inline __must_check bool __refcount_dec_and_test(refcount_t *r, int *oldp)
327{
328 return __refcount_sub_and_test(1, r, oldp);
329}
330
331static inline __must_check bool refcount_dec_and_test(refcount_t *r)
332{
333 return __refcount_dec_and_test(r, NULL);
334}
335
336/**
337 * refcount_dec - decrement a refcount
338 * @r: the refcount
339 *
340 * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
341 * when saturated at REFCOUNT_SATURATED.
342 *
343 * Provides release memory ordering, such that prior loads and stores are done
344 * before.
345 */
346static inline void __refcount_dec(refcount_t *r, int *oldp)
347{
348 int old = atomic_fetch_sub_release(1, &r->refs);
349
350 if (oldp)
351 *oldp = old;
352
353 if (unlikely(old <= 1))
354 refcount_warn_saturate(r, REFCOUNT_DEC_LEAK);
355}
356
357static inline void refcount_dec(refcount_t *r)
358{
359 __refcount_dec(r, NULL);
360}
361
362extern __must_check bool refcount_dec_if_one(refcount_t *r);
363extern __must_check bool refcount_dec_not_one(refcount_t *r);
364extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock);
365extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock);
366extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
367 spinlock_t *lock,
368 unsigned long *flags);
369#endif /* _LINUX_REFCOUNT_H */