Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

rqspinlock: Add macros for rqspinlock usage

Introduce helper macros that wrap around the rqspinlock slow path and
provide an interface analogous to the raw_spin_lock API. Note that
in case of error conditions, preemption and IRQ disabling is
automatically unrolled before returning the error back to the caller.

Ensure that in absence of CONFIG_QUEUED_SPINLOCKS support, we fallback
to the test-and-set implementation.

Add some comments describing the subtle memory ordering logic during
unlock, and why it's safe.

Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Link: https://lore.kernel.org/r/20250316040541.108729-17-memxor@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Kumar Kartikeya Dwivedi and committed by
Alexei Starovoitov
101acd2e ecbd8047

+87
+87
include/asm-generic/rqspinlock.h
··· 153 153 this_cpu_dec(rqspinlock_held_locks.cnt); 154 154 } 155 155 156 + #ifdef CONFIG_QUEUED_SPINLOCKS 157 + 158 + /** 159 + * res_spin_lock - acquire a queued spinlock 160 + * @lock: Pointer to queued spinlock structure 161 + * 162 + * Return: 163 + * * 0 - Lock was acquired successfully. 164 + * * -EDEADLK - Lock acquisition failed because of AA/ABBA deadlock. 165 + * * -ETIMEDOUT - Lock acquisition failed because of timeout. 166 + */ 167 + static __always_inline int res_spin_lock(rqspinlock_t *lock) 168 + { 169 + int val = 0; 170 + 171 + if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL))) { 172 + grab_held_lock_entry(lock); 173 + return 0; 174 + } 175 + return resilient_queued_spin_lock_slowpath(lock, val); 176 + } 177 + 178 + #else 179 + 180 + #define res_spin_lock(lock) resilient_tas_spin_lock(lock) 181 + 182 + #endif /* CONFIG_QUEUED_SPINLOCKS */ 183 + 184 + static __always_inline void res_spin_unlock(rqspinlock_t *lock) 185 + { 186 + struct rqspinlock_held *rqh = this_cpu_ptr(&rqspinlock_held_locks); 187 + 188 + if (unlikely(rqh->cnt > RES_NR_HELD)) 189 + goto unlock; 190 + WRITE_ONCE(rqh->locks[rqh->cnt - 1], NULL); 191 + unlock: 192 + /* 193 + * Release barrier, ensures correct ordering. See release_held_lock_entry 194 + * for details. Perform release store instead of queued_spin_unlock, 195 + * since we use this function for test-and-set fallback as well. When we 196 + * have CONFIG_QUEUED_SPINLOCKS=n, we clear the full 4-byte lockword. 197 + * 198 + * Like release_held_lock_entry, we can do the release before the dec. 199 + * We simply care about not seeing the 'lock' in our table from a remote 200 + * CPU once the lock has been released, which doesn't rely on the dec. 201 + * 202 + * Unlike smp_wmb(), release is not a two way fence, hence it is 203 + * possible for a inc to move up and reorder with our clearing of the 204 + * entry. This isn't a problem however, as for a misdiagnosis of ABBA, 205 + * the remote CPU needs to hold this lock, which won't be released until 206 + * the store below is done, which would ensure the entry is overwritten 207 + * to NULL, etc. 208 + */ 209 + smp_store_release(&lock->locked, 0); 210 + this_cpu_dec(rqspinlock_held_locks.cnt); 211 + } 212 + 213 + #ifdef CONFIG_QUEUED_SPINLOCKS 214 + #define raw_res_spin_lock_init(lock) ({ *(lock) = (rqspinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; }) 215 + #else 216 + #define raw_res_spin_lock_init(lock) ({ *(lock) = (rqspinlock_t){0}; }) 217 + #endif 218 + 219 + #define raw_res_spin_lock(lock) \ 220 + ({ \ 221 + int __ret; \ 222 + preempt_disable(); \ 223 + __ret = res_spin_lock(lock); \ 224 + if (__ret) \ 225 + preempt_enable(); \ 226 + __ret; \ 227 + }) 228 + 229 + #define raw_res_spin_unlock(lock) ({ res_spin_unlock(lock); preempt_enable(); }) 230 + 231 + #define raw_res_spin_lock_irqsave(lock, flags) \ 232 + ({ \ 233 + int __ret; \ 234 + local_irq_save(flags); \ 235 + __ret = raw_res_spin_lock(lock); \ 236 + if (__ret) \ 237 + local_irq_restore(flags); \ 238 + __ret; \ 239 + }) 240 + 241 + #define raw_res_spin_unlock_irqrestore(lock, flags) ({ raw_res_spin_unlock(lock); local_irq_restore(flags); }) 242 + 156 243 #endif /* __ASM_GENERIC_RQSPINLOCK_H */