at master 5.6 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-only */ 2#ifndef _LINUX_RCUREF_H 3#define _LINUX_RCUREF_H 4 5#include <linux/atomic.h> 6#include <linux/bug.h> 7#include <linux/limits.h> 8#include <linux/lockdep.h> 9#include <linux/preempt.h> 10#include <linux/rcupdate.h> 11 12#define RCUREF_ONEREF 0x00000000U 13#define RCUREF_MAXREF 0x7FFFFFFFU 14#define RCUREF_SATURATED 0xA0000000U 15#define RCUREF_RELEASED 0xC0000000U 16#define RCUREF_DEAD 0xE0000000U 17#define RCUREF_NOREF 0xFFFFFFFFU 18 19/** 20 * rcuref_init - Initialize a rcuref reference count with the given reference count 21 * @ref: Pointer to the reference count 22 * @cnt: The initial reference count typically '1' 23 */ 24static inline void rcuref_init(rcuref_t *ref, unsigned int cnt) 25{ 26 atomic_set(&ref->refcnt, cnt - 1); 27} 28 29/** 30 * rcuref_read - Read the number of held reference counts of a rcuref 31 * @ref: Pointer to the reference count 32 * 33 * Return: The number of held references (0 ... N). The value 0 does not 34 * indicate that it is safe to schedule the object, protected by this reference 35 * counter, for deconstruction. 36 * If you want to know if the reference counter has been marked DEAD (as 37 * signaled by rcuref_put()) please use rcuread_is_dead(). 38 */ 39static inline unsigned int rcuref_read(rcuref_t *ref) 40{ 41 unsigned int c = atomic_read(&ref->refcnt); 42 43 /* Return 0 if within the DEAD zone. */ 44 return c >= RCUREF_RELEASED ? 0 : c + 1; 45} 46 47/** 48 * rcuref_is_dead - Check if the rcuref has been already marked dead 49 * @ref: Pointer to the reference count 50 * 51 * Return: True if the object has been marked DEAD. This signals that a previous 52 * invocation of rcuref_put() returned true on this reference counter meaning 53 * the protected object can safely be scheduled for deconstruction. 54 * Otherwise, returns false. 55 */ 56static inline bool rcuref_is_dead(rcuref_t *ref) 57{ 58 unsigned int c = atomic_read(&ref->refcnt); 59 60 return (c >= RCUREF_RELEASED) && (c < RCUREF_NOREF); 61} 62 63extern __must_check bool rcuref_get_slowpath(rcuref_t *ref); 64 65/** 66 * rcuref_get - Acquire one reference on a rcuref reference count 67 * @ref: Pointer to the reference count 68 * 69 * Similar to atomic_inc_not_zero() but saturates at RCUREF_MAXREF. 70 * 71 * Provides no memory ordering, it is assumed the caller has guaranteed the 72 * object memory to be stable (RCU, etc.). It does provide a control dependency 73 * and thereby orders future stores. See documentation in lib/rcuref.c 74 * 75 * Return: 76 * False if the attempt to acquire a reference failed. This happens 77 * when the last reference has been put already 78 * 79 * True if a reference was successfully acquired 80 */ 81static inline __must_check bool rcuref_get(rcuref_t *ref) 82{ 83 /* 84 * Unconditionally increase the reference count. The saturation and 85 * dead zones provide enough tolerance for this. 86 */ 87 if (likely(!atomic_add_negative_relaxed(1, &ref->refcnt))) 88 return true; 89 90 /* Handle the cases inside the saturation and dead zones */ 91 return rcuref_get_slowpath(ref); 92} 93 94extern __must_check bool rcuref_put_slowpath(rcuref_t *ref, unsigned int cnt); 95 96/* 97 * Internal helper. Do not invoke directly. 98 */ 99static __always_inline __must_check bool __rcuref_put(rcuref_t *ref) 100{ 101 int cnt; 102 103 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && preemptible(), 104 "suspicious rcuref_put_rcusafe() usage"); 105 /* 106 * Unconditionally decrease the reference count. The saturation and 107 * dead zones provide enough tolerance for this. 108 */ 109 cnt = atomic_sub_return_release(1, &ref->refcnt); 110 if (likely(cnt >= 0)) 111 return false; 112 113 /* 114 * Handle the last reference drop and cases inside the saturation 115 * and dead zones. 116 */ 117 return rcuref_put_slowpath(ref, cnt); 118} 119 120/** 121 * rcuref_put_rcusafe -- Release one reference for a rcuref reference count RCU safe 122 * @ref: Pointer to the reference count 123 * 124 * Provides release memory ordering, such that prior loads and stores are done 125 * before, and provides an acquire ordering on success such that free() 126 * must come after. 127 * 128 * Can be invoked from contexts, which guarantee that no grace period can 129 * happen which would free the object concurrently if the decrement drops 130 * the last reference and the slowpath races against a concurrent get() and 131 * put() pair. rcu_read_lock()'ed and atomic contexts qualify. 132 * 133 * Return: 134 * True if this was the last reference with no future references 135 * possible. This signals the caller that it can safely release the 136 * object which is protected by the reference counter. 137 * 138 * False if there are still active references or the put() raced 139 * with a concurrent get()/put() pair. Caller is not allowed to 140 * release the protected object. 141 */ 142static inline __must_check bool rcuref_put_rcusafe(rcuref_t *ref) 143{ 144 return __rcuref_put(ref); 145} 146 147/** 148 * rcuref_put -- Release one reference for a rcuref reference count 149 * @ref: Pointer to the reference count 150 * 151 * Can be invoked from any context. 152 * 153 * Provides release memory ordering, such that prior loads and stores are done 154 * before, and provides an acquire ordering on success such that free() 155 * must come after. 156 * 157 * Return: 158 * 159 * True if this was the last reference with no future references 160 * possible. This signals the caller that it can safely schedule the 161 * object, which is protected by the reference counter, for 162 * deconstruction. 163 * 164 * False if there are still active references or the put() raced 165 * with a concurrent get()/put() pair. Caller is not allowed to 166 * deconstruct the protected object. 167 */ 168static inline __must_check bool rcuref_put(rcuref_t *ref) 169{ 170 bool released; 171 172 preempt_disable(); 173 released = __rcuref_put(ref); 174 preempt_enable(); 175 return released; 176} 177 178#endif