Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/export.h>
3#include <linux/lockref.h>
4
5#if USE_CMPXCHG_LOCKREF
6
7/*
8 * Note that the "cmpxchg()" reloads the "old" value for the
9 * failure case.
10 */
11#define CMPXCHG_LOOP(CODE, SUCCESS) do { \
12 int retry = 100; \
13 struct lockref old; \
14 BUILD_BUG_ON(sizeof(old) != 8); \
15 old.lock_count = READ_ONCE(lockref->lock_count); \
16 while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
17 struct lockref new = old; \
18 CODE \
19 if (likely(try_cmpxchg64_relaxed(&lockref->lock_count, \
20 &old.lock_count, \
21 new.lock_count))) { \
22 SUCCESS; \
23 } \
24 if (!--retry) \
25 break; \
26 } \
27} while (0)
28
29#else
30
31#define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
32
33#endif
34
35/**
36 * lockref_get - Increments reference count unconditionally
37 * @lockref: pointer to lockref structure
38 *
39 * This operation is only valid if you already hold a reference
40 * to the object, so you know the count cannot be zero.
41 */
42void lockref_get(struct lockref *lockref)
43{
44 CMPXCHG_LOOP(
45 new.count++;
46 ,
47 return;
48 );
49
50 spin_lock(&lockref->lock);
51 lockref->count++;
52 spin_unlock(&lockref->lock);
53}
54EXPORT_SYMBOL(lockref_get);
55
56/**
57 * lockref_get_not_zero - Increments count unless the count is 0 or dead
58 * @lockref: pointer to lockref structure
59 * Return: 1 if count updated successfully or 0 if count was zero
60 */
61bool lockref_get_not_zero(struct lockref *lockref)
62{
63 bool retval = false;
64
65 CMPXCHG_LOOP(
66 new.count++;
67 if (old.count <= 0)
68 return false;
69 ,
70 return true;
71 );
72
73 spin_lock(&lockref->lock);
74 if (lockref->count > 0) {
75 lockref->count++;
76 retval = true;
77 }
78 spin_unlock(&lockref->lock);
79 return retval;
80}
81EXPORT_SYMBOL(lockref_get_not_zero);
82
83/**
84 * lockref_put_return - Decrement reference count if possible
85 * @lockref: pointer to lockref structure
86 *
87 * Decrement the reference count and return the new value.
88 * If the lockref was dead or locked, return -1.
89 */
90int lockref_put_return(struct lockref *lockref)
91{
92 CMPXCHG_LOOP(
93 new.count--;
94 if (old.count <= 0)
95 return -1;
96 ,
97 return new.count;
98 );
99 return -1;
100}
101EXPORT_SYMBOL(lockref_put_return);
102
103/**
104 * lockref_put_or_lock - decrements count unless count <= 1 before decrement
105 * @lockref: pointer to lockref structure
106 * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
107 */
108#undef lockref_put_or_lock
109bool lockref_put_or_lock(struct lockref *lockref)
110{
111 CMPXCHG_LOOP(
112 new.count--;
113 if (old.count <= 1)
114 break;
115 ,
116 return true;
117 );
118
119 spin_lock(&lockref->lock);
120 if (lockref->count <= 1)
121 return false;
122 lockref->count--;
123 spin_unlock(&lockref->lock);
124 return true;
125}
126EXPORT_SYMBOL(lockref_put_or_lock);
127
128/**
129 * lockref_mark_dead - mark lockref dead
130 * @lockref: pointer to lockref structure
131 */
132void lockref_mark_dead(struct lockref *lockref)
133{
134 assert_spin_locked(&lockref->lock);
135 lockref->count = -128;
136}
137EXPORT_SYMBOL(lockref_mark_dead);
138
139/**
140 * lockref_get_not_dead - Increments count unless the ref is dead
141 * @lockref: pointer to lockref structure
142 * Return: 1 if count updated successfully or 0 if lockref was dead
143 */
144bool lockref_get_not_dead(struct lockref *lockref)
145{
146 bool retval = false;
147
148 CMPXCHG_LOOP(
149 new.count++;
150 if (old.count < 0)
151 return false;
152 ,
153 return true;
154 );
155
156 spin_lock(&lockref->lock);
157 if (lockref->count >= 0) {
158 lockref->count++;
159 retval = true;
160 }
161 spin_unlock(&lockref->lock);
162 return retval;
163}
164EXPORT_SYMBOL(lockref_get_not_dead);