at v4.14 12 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef __LINUX_SPINLOCK_H 3#define __LINUX_SPINLOCK_H 4 5/* 6 * include/linux/spinlock.h - generic spinlock/rwlock declarations 7 * 8 * here's the role of the various spinlock/rwlock related include files: 9 * 10 * on SMP builds: 11 * 12 * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the 13 * initializers 14 * 15 * linux/spinlock_types.h: 16 * defines the generic type and initializers 17 * 18 * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel 19 * implementations, mostly inline assembly code 20 * 21 * (also included on UP-debug builds:) 22 * 23 * linux/spinlock_api_smp.h: 24 * contains the prototypes for the _spin_*() APIs. 25 * 26 * linux/spinlock.h: builds the final spin_*() APIs. 27 * 28 * on UP builds: 29 * 30 * linux/spinlock_type_up.h: 31 * contains the generic, simplified UP spinlock type. 32 * (which is an empty structure on non-debug builds) 33 * 34 * linux/spinlock_types.h: 35 * defines the generic type and initializers 36 * 37 * linux/spinlock_up.h: 38 * contains the arch_spin_*()/etc. version of UP 39 * builds. (which are NOPs on non-debug, non-preempt 40 * builds) 41 * 42 * (included on UP-non-debug builds:) 43 * 44 * linux/spinlock_api_up.h: 45 * builds the _spin_*() APIs. 46 * 47 * linux/spinlock.h: builds the final spin_*() APIs. 48 */ 49 50#include <linux/typecheck.h> 51#include <linux/preempt.h> 52#include <linux/linkage.h> 53#include <linux/compiler.h> 54#include <linux/irqflags.h> 55#include <linux/thread_info.h> 56#include <linux/kernel.h> 57#include <linux/stringify.h> 58#include <linux/bottom_half.h> 59#include <asm/barrier.h> 60 61 62/* 63 * Must define these before including other files, inline functions need them 64 */ 65#define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME 66 67#define LOCK_SECTION_START(extra) \ 68 ".subsection 1\n\t" \ 69 extra \ 70 ".ifndef " LOCK_SECTION_NAME "\n\t" \ 71 LOCK_SECTION_NAME ":\n\t" \ 72 ".endif\n" 73 74#define LOCK_SECTION_END \ 75 ".previous\n\t" 76 77#define __lockfunc __attribute__((section(".spinlock.text"))) 78 79/* 80 * Pull the arch_spinlock_t and arch_rwlock_t definitions: 81 */ 82#include <linux/spinlock_types.h> 83 84/* 85 * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them): 86 */ 87#ifdef CONFIG_SMP 88# include <asm/spinlock.h> 89#else 90# include <linux/spinlock_up.h> 91#endif 92 93#ifdef CONFIG_DEBUG_SPINLOCK 94 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, 95 struct lock_class_key *key); 96# define raw_spin_lock_init(lock) \ 97do { \ 98 static struct lock_class_key __key; \ 99 \ 100 __raw_spin_lock_init((lock), #lock, &__key); \ 101} while (0) 102 103#else 104# define raw_spin_lock_init(lock) \ 105 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) 106#endif 107 108#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) 109 110#ifdef CONFIG_GENERIC_LOCKBREAK 111#define raw_spin_is_contended(lock) ((lock)->break_lock) 112#else 113 114#ifdef arch_spin_is_contended 115#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) 116#else 117#define raw_spin_is_contended(lock) (((void)(lock), 0)) 118#endif /*arch_spin_is_contended*/ 119#endif 120 121/* 122 * This barrier must provide two things: 123 * 124 * - it must guarantee a STORE before the spin_lock() is ordered against a 125 * LOAD after it, see the comments at its two usage sites. 126 * 127 * - it must ensure the critical section is RCsc. 128 * 129 * The latter is important for cases where we observe values written by other 130 * CPUs in spin-loops, without barriers, while being subject to scheduling. 131 * 132 * CPU0 CPU1 CPU2 133 * 134 * for (;;) { 135 * if (READ_ONCE(X)) 136 * break; 137 * } 138 * X=1 139 * <sched-out> 140 * <sched-in> 141 * r = X; 142 * 143 * without transitivity it could be that CPU1 observes X!=0 breaks the loop, 144 * we get migrated and CPU2 sees X==0. 145 * 146 * Since most load-store architectures implement ACQUIRE with an smp_mb() after 147 * the LL/SC loop, they need no further barriers. Similarly all our TSO 148 * architectures imply an smp_mb() for each atomic instruction and equally don't 149 * need more. 150 * 151 * Architectures that can implement ACQUIRE better need to take care. 152 */ 153#ifndef smp_mb__after_spinlock 154#define smp_mb__after_spinlock() do { } while (0) 155#endif 156 157#ifdef CONFIG_DEBUG_SPINLOCK 158 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); 159#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock) 160 extern int do_raw_spin_trylock(raw_spinlock_t *lock); 161 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); 162#else 163static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock) 164{ 165 __acquire(lock); 166 arch_spin_lock(&lock->raw_lock); 167} 168 169static inline void 170do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock) 171{ 172 __acquire(lock); 173 arch_spin_lock_flags(&lock->raw_lock, *flags); 174} 175 176static inline int do_raw_spin_trylock(raw_spinlock_t *lock) 177{ 178 return arch_spin_trylock(&(lock)->raw_lock); 179} 180 181static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) 182{ 183 arch_spin_unlock(&lock->raw_lock); 184 __release(lock); 185} 186#endif 187 188/* 189 * Define the various spin_lock methods. Note we define these 190 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The 191 * various methods are defined as nops in the case they are not 192 * required. 193 */ 194#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock)) 195 196#define raw_spin_lock(lock) _raw_spin_lock(lock) 197 198#ifdef CONFIG_DEBUG_LOCK_ALLOC 199# define raw_spin_lock_nested(lock, subclass) \ 200 _raw_spin_lock_nested(lock, subclass) 201 202# define raw_spin_lock_nest_lock(lock, nest_lock) \ 203 do { \ 204 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ 205 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ 206 } while (0) 207#else 208/* 209 * Always evaluate the 'subclass' argument to avoid that the compiler 210 * warns about set-but-not-used variables when building with 211 * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1. 212 */ 213# define raw_spin_lock_nested(lock, subclass) \ 214 _raw_spin_lock(((void)(subclass), (lock))) 215# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) 216#endif 217 218#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 219 220#define raw_spin_lock_irqsave(lock, flags) \ 221 do { \ 222 typecheck(unsigned long, flags); \ 223 flags = _raw_spin_lock_irqsave(lock); \ 224 } while (0) 225 226#ifdef CONFIG_DEBUG_LOCK_ALLOC 227#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ 228 do { \ 229 typecheck(unsigned long, flags); \ 230 flags = _raw_spin_lock_irqsave_nested(lock, subclass); \ 231 } while (0) 232#else 233#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ 234 do { \ 235 typecheck(unsigned long, flags); \ 236 flags = _raw_spin_lock_irqsave(lock); \ 237 } while (0) 238#endif 239 240#else 241 242#define raw_spin_lock_irqsave(lock, flags) \ 243 do { \ 244 typecheck(unsigned long, flags); \ 245 _raw_spin_lock_irqsave(lock, flags); \ 246 } while (0) 247 248#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ 249 raw_spin_lock_irqsave(lock, flags) 250 251#endif 252 253#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock) 254#define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock) 255#define raw_spin_unlock(lock) _raw_spin_unlock(lock) 256#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock) 257 258#define raw_spin_unlock_irqrestore(lock, flags) \ 259 do { \ 260 typecheck(unsigned long, flags); \ 261 _raw_spin_unlock_irqrestore(lock, flags); \ 262 } while (0) 263#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock) 264 265#define raw_spin_trylock_bh(lock) \ 266 __cond_lock(lock, _raw_spin_trylock_bh(lock)) 267 268#define raw_spin_trylock_irq(lock) \ 269({ \ 270 local_irq_disable(); \ 271 raw_spin_trylock(lock) ? \ 272 1 : ({ local_irq_enable(); 0; }); \ 273}) 274 275#define raw_spin_trylock_irqsave(lock, flags) \ 276({ \ 277 local_irq_save(flags); \ 278 raw_spin_trylock(lock) ? \ 279 1 : ({ local_irq_restore(flags); 0; }); \ 280}) 281 282/** 283 * raw_spin_can_lock - would raw_spin_trylock() succeed? 284 * @lock: the spinlock in question. 285 */ 286#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock)) 287 288/* Include rwlock functions */ 289#include <linux/rwlock.h> 290 291/* 292 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: 293 */ 294#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 295# include <linux/spinlock_api_smp.h> 296#else 297# include <linux/spinlock_api_up.h> 298#endif 299 300/* 301 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n 302 */ 303 304static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock) 305{ 306 return &lock->rlock; 307} 308 309#define spin_lock_init(_lock) \ 310do { \ 311 spinlock_check(_lock); \ 312 raw_spin_lock_init(&(_lock)->rlock); \ 313} while (0) 314 315static __always_inline void spin_lock(spinlock_t *lock) 316{ 317 raw_spin_lock(&lock->rlock); 318} 319 320static __always_inline void spin_lock_bh(spinlock_t *lock) 321{ 322 raw_spin_lock_bh(&lock->rlock); 323} 324 325static __always_inline int spin_trylock(spinlock_t *lock) 326{ 327 return raw_spin_trylock(&lock->rlock); 328} 329 330#define spin_lock_nested(lock, subclass) \ 331do { \ 332 raw_spin_lock_nested(spinlock_check(lock), subclass); \ 333} while (0) 334 335#define spin_lock_nest_lock(lock, nest_lock) \ 336do { \ 337 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ 338} while (0) 339 340static __always_inline void spin_lock_irq(spinlock_t *lock) 341{ 342 raw_spin_lock_irq(&lock->rlock); 343} 344 345#define spin_lock_irqsave(lock, flags) \ 346do { \ 347 raw_spin_lock_irqsave(spinlock_check(lock), flags); \ 348} while (0) 349 350#define spin_lock_irqsave_nested(lock, flags, subclass) \ 351do { \ 352 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ 353} while (0) 354 355static __always_inline void spin_unlock(spinlock_t *lock) 356{ 357 raw_spin_unlock(&lock->rlock); 358} 359 360static __always_inline void spin_unlock_bh(spinlock_t *lock) 361{ 362 raw_spin_unlock_bh(&lock->rlock); 363} 364 365static __always_inline void spin_unlock_irq(spinlock_t *lock) 366{ 367 raw_spin_unlock_irq(&lock->rlock); 368} 369 370static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) 371{ 372 raw_spin_unlock_irqrestore(&lock->rlock, flags); 373} 374 375static __always_inline int spin_trylock_bh(spinlock_t *lock) 376{ 377 return raw_spin_trylock_bh(&lock->rlock); 378} 379 380static __always_inline int spin_trylock_irq(spinlock_t *lock) 381{ 382 return raw_spin_trylock_irq(&lock->rlock); 383} 384 385#define spin_trylock_irqsave(lock, flags) \ 386({ \ 387 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ 388}) 389 390static __always_inline int spin_is_locked(spinlock_t *lock) 391{ 392 return raw_spin_is_locked(&lock->rlock); 393} 394 395static __always_inline int spin_is_contended(spinlock_t *lock) 396{ 397 return raw_spin_is_contended(&lock->rlock); 398} 399 400static __always_inline int spin_can_lock(spinlock_t *lock) 401{ 402 return raw_spin_can_lock(&lock->rlock); 403} 404 405#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) 406 407/* 408 * Pull the atomic_t declaration: 409 * (asm-mips/atomic.h needs above definitions) 410 */ 411#include <linux/atomic.h> 412/** 413 * atomic_dec_and_lock - lock on reaching reference count zero 414 * @atomic: the atomic counter 415 * @lock: the spinlock in question 416 * 417 * Decrements @atomic by 1. If the result is 0, returns true and locks 418 * @lock. Returns false for all other cases. 419 */ 420extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); 421#define atomic_dec_and_lock(atomic, lock) \ 422 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) 423 424#endif /* __LINUX_SPINLOCK_H */