Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mutex: Move ww_mutex definitions to ww_mutex.h

Move the definitions for wound/wait mutexes out to a separate
header, ww_mutex.h. This reduces clutter in mutex.h, and
increases readability.

Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Rik van Riel <riel@redhat.com>
Acked-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
Cc: Dave Airlie <airlied@gmail.com>
Link: http://lkml.kernel.org/r/51D675DC.3000907@canonical.com
[ Tidied up the code a bit. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Maarten Lankhorst and committed by
Ingo Molnar
1b375dc3 2e17c5a9

+381 -359
-358
include/linux/mutex.h
··· 78 78 #endif 79 79 }; 80 80 81 - struct ww_class { 82 - atomic_long_t stamp; 83 - struct lock_class_key acquire_key; 84 - struct lock_class_key mutex_key; 85 - const char *acquire_name; 86 - const char *mutex_name; 87 - }; 88 - 89 - struct ww_acquire_ctx { 90 - struct task_struct *task; 91 - unsigned long stamp; 92 - unsigned acquired; 93 - #ifdef CONFIG_DEBUG_MUTEXES 94 - unsigned done_acquire; 95 - struct ww_class *ww_class; 96 - struct ww_mutex *contending_lock; 97 - #endif 98 - #ifdef CONFIG_DEBUG_LOCK_ALLOC 99 - struct lockdep_map dep_map; 100 - #endif 101 - #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH 102 - unsigned deadlock_inject_interval; 103 - unsigned deadlock_inject_countdown; 104 - #endif 105 - }; 106 - 107 - struct ww_mutex { 108 - struct mutex base; 109 - struct ww_acquire_ctx *ctx; 110 - #ifdef CONFIG_DEBUG_MUTEXES 111 - struct ww_class *ww_class; 112 - #endif 113 - }; 114 - 115 81 #ifdef CONFIG_DEBUG_MUTEXES 116 82 # include <linux/mutex-debug.h> 117 83 #else ··· 102 136 #ifdef CONFIG_DEBUG_LOCK_ALLOC 103 137 # define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ 104 138 , .dep_map = { .name = #lockname } 105 - # define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class) \ 106 - , .ww_class = &ww_class 107 139 #else 108 140 # define __DEP_MAP_MUTEX_INITIALIZER(lockname) 109 - # define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class) 110 141 #endif 111 142 112 143 #define __MUTEX_INITIALIZER(lockname) \ ··· 113 150 __DEBUG_MUTEX_INITIALIZER(lockname) \ 114 151 __DEP_MAP_MUTEX_INITIALIZER(lockname) } 115 152 116 - #define __WW_CLASS_INITIALIZER(ww_class) \ 117 - { .stamp = ATOMIC_LONG_INIT(0) \ 118 - , .acquire_name = #ww_class "_acquire" \ 119 - , .mutex_name = #ww_class "_mutex" } 120 - 121 - #define __WW_MUTEX_INITIALIZER(lockname, class) \ 122 - { .base = { \__MUTEX_INITIALIZER(lockname) } \ 123 - __WW_CLASS_MUTEX_INITIALIZER(lockname, class) } 124 - 125 153 #define DEFINE_MUTEX(mutexname) \ 126 154 struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) 127 155 128 - #define DEFINE_WW_CLASS(classname) \ 129 - struct ww_class classname = __WW_CLASS_INITIALIZER(classname) 130 - 131 - #define DEFINE_WW_MUTEX(mutexname, ww_class) \ 132 - struct ww_mutex mutexname = __WW_MUTEX_INITIALIZER(mutexname, ww_class) 133 - 134 - 135 156 extern void __mutex_init(struct mutex *lock, const char *name, 136 157 struct lock_class_key *key); 137 - 138 - /** 139 - * ww_mutex_init - initialize the w/w mutex 140 - * @lock: the mutex to be initialized 141 - * @ww_class: the w/w class the mutex should belong to 142 - * 143 - * Initialize the w/w mutex to unlocked state and associate it with the given 144 - * class. 145 - * 146 - * It is not allowed to initialize an already locked mutex. 147 - */ 148 - static inline void ww_mutex_init(struct ww_mutex *lock, 149 - struct ww_class *ww_class) 150 - { 151 - __mutex_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key); 152 - lock->ctx = NULL; 153 - #ifdef CONFIG_DEBUG_MUTEXES 154 - lock->ww_class = ww_class; 155 - #endif 156 - } 157 158 158 159 /** 159 160 * mutex_is_locked - is the mutex locked ··· 172 245 */ 173 246 extern int mutex_trylock(struct mutex *lock); 174 247 extern void mutex_unlock(struct mutex *lock); 175 - 176 - /** 177 - * ww_acquire_init - initialize a w/w acquire context 178 - * @ctx: w/w acquire context to initialize 179 - * @ww_class: w/w class of the context 180 - * 181 - * Initializes an context to acquire multiple mutexes of the given w/w class. 182 - * 183 - * Context-based w/w mutex acquiring can be done in any order whatsoever within 184 - * a given lock class. Deadlocks will be detected and handled with the 185 - * wait/wound logic. 186 - * 187 - * Mixing of context-based w/w mutex acquiring and single w/w mutex locking can 188 - * result in undetected deadlocks and is so forbidden. Mixing different contexts 189 - * for the same w/w class when acquiring mutexes can also result in undetected 190 - * deadlocks, and is hence also forbidden. Both types of abuse will be caught by 191 - * enabling CONFIG_PROVE_LOCKING. 192 - * 193 - * Nesting of acquire contexts for _different_ w/w classes is possible, subject 194 - * to the usual locking rules between different lock classes. 195 - * 196 - * An acquire context must be released with ww_acquire_fini by the same task 197 - * before the memory is freed. It is recommended to allocate the context itself 198 - * on the stack. 199 - */ 200 - static inline void ww_acquire_init(struct ww_acquire_ctx *ctx, 201 - struct ww_class *ww_class) 202 - { 203 - ctx->task = current; 204 - ctx->stamp = atomic_long_inc_return(&ww_class->stamp); 205 - ctx->acquired = 0; 206 - #ifdef CONFIG_DEBUG_MUTEXES 207 - ctx->ww_class = ww_class; 208 - ctx->done_acquire = 0; 209 - ctx->contending_lock = NULL; 210 - #endif 211 - #ifdef CONFIG_DEBUG_LOCK_ALLOC 212 - debug_check_no_locks_freed((void *)ctx, sizeof(*ctx)); 213 - lockdep_init_map(&ctx->dep_map, ww_class->acquire_name, 214 - &ww_class->acquire_key, 0); 215 - mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_); 216 - #endif 217 - #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH 218 - ctx->deadlock_inject_interval = 1; 219 - ctx->deadlock_inject_countdown = ctx->stamp & 0xf; 220 - #endif 221 - } 222 - 223 - /** 224 - * ww_acquire_done - marks the end of the acquire phase 225 - * @ctx: the acquire context 226 - * 227 - * Marks the end of the acquire phase, any further w/w mutex lock calls using 228 - * this context are forbidden. 229 - * 230 - * Calling this function is optional, it is just useful to document w/w mutex 231 - * code and clearly designated the acquire phase from actually using the locked 232 - * data structures. 233 - */ 234 - static inline void ww_acquire_done(struct ww_acquire_ctx *ctx) 235 - { 236 - #ifdef CONFIG_DEBUG_MUTEXES 237 - lockdep_assert_held(ctx); 238 - 239 - DEBUG_LOCKS_WARN_ON(ctx->done_acquire); 240 - ctx->done_acquire = 1; 241 - #endif 242 - } 243 - 244 - /** 245 - * ww_acquire_fini - releases a w/w acquire context 246 - * @ctx: the acquire context to free 247 - * 248 - * Releases a w/w acquire context. This must be called _after_ all acquired w/w 249 - * mutexes have been released with ww_mutex_unlock. 250 - */ 251 - static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx) 252 - { 253 - #ifdef CONFIG_DEBUG_MUTEXES 254 - mutex_release(&ctx->dep_map, 0, _THIS_IP_); 255 - 256 - DEBUG_LOCKS_WARN_ON(ctx->acquired); 257 - if (!config_enabled(CONFIG_PROVE_LOCKING)) 258 - /* 259 - * lockdep will normally handle this, 260 - * but fail without anyway 261 - */ 262 - ctx->done_acquire = 1; 263 - 264 - if (!config_enabled(CONFIG_DEBUG_LOCK_ALLOC)) 265 - /* ensure ww_acquire_fini will still fail if called twice */ 266 - ctx->acquired = ~0U; 267 - #endif 268 - } 269 - 270 - extern int __must_check __ww_mutex_lock(struct ww_mutex *lock, 271 - struct ww_acquire_ctx *ctx); 272 - extern int __must_check __ww_mutex_lock_interruptible(struct ww_mutex *lock, 273 - struct ww_acquire_ctx *ctx); 274 - 275 - /** 276 - * ww_mutex_lock - acquire the w/w mutex 277 - * @lock: the mutex to be acquired 278 - * @ctx: w/w acquire context, or NULL to acquire only a single lock. 279 - * 280 - * Lock the w/w mutex exclusively for this task. 281 - * 282 - * Deadlocks within a given w/w class of locks are detected and handled with the 283 - * wait/wound algorithm. If the lock isn't immediately avaiable this function 284 - * will either sleep until it is (wait case). Or it selects the current context 285 - * for backing off by returning -EDEADLK (wound case). Trying to acquire the 286 - * same lock with the same context twice is also detected and signalled by 287 - * returning -EALREADY. Returns 0 if the mutex was successfully acquired. 288 - * 289 - * In the wound case the caller must release all currently held w/w mutexes for 290 - * the given context and then wait for this contending lock to be available by 291 - * calling ww_mutex_lock_slow. Alternatively callers can opt to not acquire this 292 - * lock and proceed with trying to acquire further w/w mutexes (e.g. when 293 - * scanning through lru lists trying to free resources). 294 - * 295 - * The mutex must later on be released by the same task that 296 - * acquired it. The task may not exit without first unlocking the mutex. Also, 297 - * kernel memory where the mutex resides must not be freed with the mutex still 298 - * locked. The mutex must first be initialized (or statically defined) before it 299 - * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be 300 - * of the same w/w lock class as was used to initialize the acquire context. 301 - * 302 - * A mutex acquired with this function must be released with ww_mutex_unlock. 303 - */ 304 - static inline int ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 305 - { 306 - if (ctx) 307 - return __ww_mutex_lock(lock, ctx); 308 - else { 309 - mutex_lock(&lock->base); 310 - return 0; 311 - } 312 - } 313 - 314 - /** 315 - * ww_mutex_lock_interruptible - acquire the w/w mutex, interruptible 316 - * @lock: the mutex to be acquired 317 - * @ctx: w/w acquire context 318 - * 319 - * Lock the w/w mutex exclusively for this task. 320 - * 321 - * Deadlocks within a given w/w class of locks are detected and handled with the 322 - * wait/wound algorithm. If the lock isn't immediately avaiable this function 323 - * will either sleep until it is (wait case). Or it selects the current context 324 - * for backing off by returning -EDEADLK (wound case). Trying to acquire the 325 - * same lock with the same context twice is also detected and signalled by 326 - * returning -EALREADY. Returns 0 if the mutex was successfully acquired. If a 327 - * signal arrives while waiting for the lock then this function returns -EINTR. 328 - * 329 - * In the wound case the caller must release all currently held w/w mutexes for 330 - * the given context and then wait for this contending lock to be available by 331 - * calling ww_mutex_lock_slow_interruptible. Alternatively callers can opt to 332 - * not acquire this lock and proceed with trying to acquire further w/w mutexes 333 - * (e.g. when scanning through lru lists trying to free resources). 334 - * 335 - * The mutex must later on be released by the same task that 336 - * acquired it. The task may not exit without first unlocking the mutex. Also, 337 - * kernel memory where the mutex resides must not be freed with the mutex still 338 - * locked. The mutex must first be initialized (or statically defined) before it 339 - * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be 340 - * of the same w/w lock class as was used to initialize the acquire context. 341 - * 342 - * A mutex acquired with this function must be released with ww_mutex_unlock. 343 - */ 344 - static inline int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock, 345 - struct ww_acquire_ctx *ctx) 346 - { 347 - if (ctx) 348 - return __ww_mutex_lock_interruptible(lock, ctx); 349 - else 350 - return mutex_lock_interruptible(&lock->base); 351 - } 352 - 353 - /** 354 - * ww_mutex_lock_slow - slowpath acquiring of the w/w mutex 355 - * @lock: the mutex to be acquired 356 - * @ctx: w/w acquire context 357 - * 358 - * Acquires a w/w mutex with the given context after a wound case. This function 359 - * will sleep until the lock becomes available. 360 - * 361 - * The caller must have released all w/w mutexes already acquired with the 362 - * context and then call this function on the contended lock. 363 - * 364 - * Afterwards the caller may continue to (re)acquire the other w/w mutexes it 365 - * needs with ww_mutex_lock. Note that the -EALREADY return code from 366 - * ww_mutex_lock can be used to avoid locking this contended mutex twice. 367 - * 368 - * It is forbidden to call this function with any other w/w mutexes associated 369 - * with the context held. It is forbidden to call this on anything else than the 370 - * contending mutex. 371 - * 372 - * Note that the slowpath lock acquiring can also be done by calling 373 - * ww_mutex_lock directly. This function here is simply to help w/w mutex 374 - * locking code readability by clearly denoting the slowpath. 375 - */ 376 - static inline void 377 - ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 378 - { 379 - int ret; 380 - #ifdef CONFIG_DEBUG_MUTEXES 381 - DEBUG_LOCKS_WARN_ON(!ctx->contending_lock); 382 - #endif 383 - ret = ww_mutex_lock(lock, ctx); 384 - (void)ret; 385 - } 386 - 387 - /** 388 - * ww_mutex_lock_slow_interruptible - slowpath acquiring of the w/w mutex, 389 - * interruptible 390 - * @lock: the mutex to be acquired 391 - * @ctx: w/w acquire context 392 - * 393 - * Acquires a w/w mutex with the given context after a wound case. This function 394 - * will sleep until the lock becomes available and returns 0 when the lock has 395 - * been acquired. If a signal arrives while waiting for the lock then this 396 - * function returns -EINTR. 397 - * 398 - * The caller must have released all w/w mutexes already acquired with the 399 - * context and then call this function on the contended lock. 400 - * 401 - * Afterwards the caller may continue to (re)acquire the other w/w mutexes it 402 - * needs with ww_mutex_lock. Note that the -EALREADY return code from 403 - * ww_mutex_lock can be used to avoid locking this contended mutex twice. 404 - * 405 - * It is forbidden to call this function with any other w/w mutexes associated 406 - * with the given context held. It is forbidden to call this on anything else 407 - * than the contending mutex. 408 - * 409 - * Note that the slowpath lock acquiring can also be done by calling 410 - * ww_mutex_lock_interruptible directly. This function here is simply to help 411 - * w/w mutex locking code readability by clearly denoting the slowpath. 412 - */ 413 - static inline int __must_check 414 - ww_mutex_lock_slow_interruptible(struct ww_mutex *lock, 415 - struct ww_acquire_ctx *ctx) 416 - { 417 - #ifdef CONFIG_DEBUG_MUTEXES 418 - DEBUG_LOCKS_WARN_ON(!ctx->contending_lock); 419 - #endif 420 - return ww_mutex_lock_interruptible(lock, ctx); 421 - } 422 - 423 - extern void ww_mutex_unlock(struct ww_mutex *lock); 424 - 425 - /** 426 - * ww_mutex_trylock - tries to acquire the w/w mutex without acquire context 427 - * @lock: mutex to lock 428 - * 429 - * Trylocks a mutex without acquire context, so no deadlock detection is 430 - * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise. 431 - */ 432 - static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock) 433 - { 434 - return mutex_trylock(&lock->base); 435 - } 436 - 437 - /*** 438 - * ww_mutex_destroy - mark a w/w mutex unusable 439 - * @lock: the mutex to be destroyed 440 - * 441 - * This function marks the mutex uninitialized, and any subsequent 442 - * use of the mutex is forbidden. The mutex must not be locked when 443 - * this function is called. 444 - */ 445 - static inline void ww_mutex_destroy(struct ww_mutex *lock) 446 - { 447 - mutex_destroy(&lock->base); 448 - } 449 - 450 - /** 451 - * ww_mutex_is_locked - is the w/w mutex locked 452 - * @lock: the mutex to be queried 453 - * 454 - * Returns 1 if the mutex is locked, 0 if unlocked. 455 - */ 456 - static inline bool ww_mutex_is_locked(struct ww_mutex *lock) 457 - { 458 - return mutex_is_locked(&lock->base); 459 - } 460 248 461 249 extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); 462 250
+1 -1
include/linux/reservation.h
··· 39 39 #ifndef _LINUX_RESERVATION_H 40 40 #define _LINUX_RESERVATION_H 41 41 42 - #include <linux/mutex.h> 42 + #include <linux/ww_mutex.h> 43 43 44 44 extern struct ww_class reservation_ww_class; 45 45
+378
include/linux/ww_mutex.h
··· 1 + /* 2 + * Wound/Wait Mutexes: blocking mutual exclusion locks with deadlock avoidance 3 + * 4 + * Original mutex implementation started by Ingo Molnar: 5 + * 6 + * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 7 + * 8 + * Wound/wait implementation: 9 + * Copyright (C) 2013 Canonical Ltd. 10 + * 11 + * This file contains the main data structure and API definitions. 12 + */ 13 + 14 + #ifndef __LINUX_WW_MUTEX_H 15 + #define __LINUX_WW_MUTEX_H 16 + 17 + #include <linux/mutex.h> 18 + 19 + struct ww_class { 20 + atomic_long_t stamp; 21 + struct lock_class_key acquire_key; 22 + struct lock_class_key mutex_key; 23 + const char *acquire_name; 24 + const char *mutex_name; 25 + }; 26 + 27 + struct ww_acquire_ctx { 28 + struct task_struct *task; 29 + unsigned long stamp; 30 + unsigned acquired; 31 + #ifdef CONFIG_DEBUG_MUTEXES 32 + unsigned done_acquire; 33 + struct ww_class *ww_class; 34 + struct ww_mutex *contending_lock; 35 + #endif 36 + #ifdef CONFIG_DEBUG_LOCK_ALLOC 37 + struct lockdep_map dep_map; 38 + #endif 39 + #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH 40 + unsigned deadlock_inject_interval; 41 + unsigned deadlock_inject_countdown; 42 + #endif 43 + }; 44 + 45 + struct ww_mutex { 46 + struct mutex base; 47 + struct ww_acquire_ctx *ctx; 48 + #ifdef CONFIG_DEBUG_MUTEXES 49 + struct ww_class *ww_class; 50 + #endif 51 + }; 52 + 53 + #ifdef CONFIG_DEBUG_LOCK_ALLOC 54 + # define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class) \ 55 + , .ww_class = &ww_class 56 + #else 57 + # define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class) 58 + #endif 59 + 60 + #define __WW_CLASS_INITIALIZER(ww_class) \ 61 + { .stamp = ATOMIC_LONG_INIT(0) \ 62 + , .acquire_name = #ww_class "_acquire" \ 63 + , .mutex_name = #ww_class "_mutex" } 64 + 65 + #define __WW_MUTEX_INITIALIZER(lockname, class) \ 66 + { .base = { \__MUTEX_INITIALIZER(lockname) } \ 67 + __WW_CLASS_MUTEX_INITIALIZER(lockname, class) } 68 + 69 + #define DEFINE_WW_CLASS(classname) \ 70 + struct ww_class classname = __WW_CLASS_INITIALIZER(classname) 71 + 72 + #define DEFINE_WW_MUTEX(mutexname, ww_class) \ 73 + struct ww_mutex mutexname = __WW_MUTEX_INITIALIZER(mutexname, ww_class) 74 + 75 + /** 76 + * ww_mutex_init - initialize the w/w mutex 77 + * @lock: the mutex to be initialized 78 + * @ww_class: the w/w class the mutex should belong to 79 + * 80 + * Initialize the w/w mutex to unlocked state and associate it with the given 81 + * class. 82 + * 83 + * It is not allowed to initialize an already locked mutex. 84 + */ 85 + static inline void ww_mutex_init(struct ww_mutex *lock, 86 + struct ww_class *ww_class) 87 + { 88 + __mutex_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key); 89 + lock->ctx = NULL; 90 + #ifdef CONFIG_DEBUG_MUTEXES 91 + lock->ww_class = ww_class; 92 + #endif 93 + } 94 + 95 + /** 96 + * ww_acquire_init - initialize a w/w acquire context 97 + * @ctx: w/w acquire context to initialize 98 + * @ww_class: w/w class of the context 99 + * 100 + * Initializes an context to acquire multiple mutexes of the given w/w class. 101 + * 102 + * Context-based w/w mutex acquiring can be done in any order whatsoever within 103 + * a given lock class. Deadlocks will be detected and handled with the 104 + * wait/wound logic. 105 + * 106 + * Mixing of context-based w/w mutex acquiring and single w/w mutex locking can 107 + * result in undetected deadlocks and is so forbidden. Mixing different contexts 108 + * for the same w/w class when acquiring mutexes can also result in undetected 109 + * deadlocks, and is hence also forbidden. Both types of abuse will be caught by 110 + * enabling CONFIG_PROVE_LOCKING. 111 + * 112 + * Nesting of acquire contexts for _different_ w/w classes is possible, subject 113 + * to the usual locking rules between different lock classes. 114 + * 115 + * An acquire context must be released with ww_acquire_fini by the same task 116 + * before the memory is freed. It is recommended to allocate the context itself 117 + * on the stack. 118 + */ 119 + static inline void ww_acquire_init(struct ww_acquire_ctx *ctx, 120 + struct ww_class *ww_class) 121 + { 122 + ctx->task = current; 123 + ctx->stamp = atomic_long_inc_return(&ww_class->stamp); 124 + ctx->acquired = 0; 125 + #ifdef CONFIG_DEBUG_MUTEXES 126 + ctx->ww_class = ww_class; 127 + ctx->done_acquire = 0; 128 + ctx->contending_lock = NULL; 129 + #endif 130 + #ifdef CONFIG_DEBUG_LOCK_ALLOC 131 + debug_check_no_locks_freed((void *)ctx, sizeof(*ctx)); 132 + lockdep_init_map(&ctx->dep_map, ww_class->acquire_name, 133 + &ww_class->acquire_key, 0); 134 + mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_); 135 + #endif 136 + #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH 137 + ctx->deadlock_inject_interval = 1; 138 + ctx->deadlock_inject_countdown = ctx->stamp & 0xf; 139 + #endif 140 + } 141 + 142 + /** 143 + * ww_acquire_done - marks the end of the acquire phase 144 + * @ctx: the acquire context 145 + * 146 + * Marks the end of the acquire phase, any further w/w mutex lock calls using 147 + * this context are forbidden. 148 + * 149 + * Calling this function is optional, it is just useful to document w/w mutex 150 + * code and clearly designated the acquire phase from actually using the locked 151 + * data structures. 152 + */ 153 + static inline void ww_acquire_done(struct ww_acquire_ctx *ctx) 154 + { 155 + #ifdef CONFIG_DEBUG_MUTEXES 156 + lockdep_assert_held(ctx); 157 + 158 + DEBUG_LOCKS_WARN_ON(ctx->done_acquire); 159 + ctx->done_acquire = 1; 160 + #endif 161 + } 162 + 163 + /** 164 + * ww_acquire_fini - releases a w/w acquire context 165 + * @ctx: the acquire context to free 166 + * 167 + * Releases a w/w acquire context. This must be called _after_ all acquired w/w 168 + * mutexes have been released with ww_mutex_unlock. 169 + */ 170 + static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx) 171 + { 172 + #ifdef CONFIG_DEBUG_MUTEXES 173 + mutex_release(&ctx->dep_map, 0, _THIS_IP_); 174 + 175 + DEBUG_LOCKS_WARN_ON(ctx->acquired); 176 + if (!config_enabled(CONFIG_PROVE_LOCKING)) 177 + /* 178 + * lockdep will normally handle this, 179 + * but fail without anyway 180 + */ 181 + ctx->done_acquire = 1; 182 + 183 + if (!config_enabled(CONFIG_DEBUG_LOCK_ALLOC)) 184 + /* ensure ww_acquire_fini will still fail if called twice */ 185 + ctx->acquired = ~0U; 186 + #endif 187 + } 188 + 189 + extern int __must_check __ww_mutex_lock(struct ww_mutex *lock, 190 + struct ww_acquire_ctx *ctx); 191 + extern int __must_check __ww_mutex_lock_interruptible(struct ww_mutex *lock, 192 + struct ww_acquire_ctx *ctx); 193 + 194 + /** 195 + * ww_mutex_lock - acquire the w/w mutex 196 + * @lock: the mutex to be acquired 197 + * @ctx: w/w acquire context, or NULL to acquire only a single lock. 198 + * 199 + * Lock the w/w mutex exclusively for this task. 200 + * 201 + * Deadlocks within a given w/w class of locks are detected and handled with the 202 + * wait/wound algorithm. If the lock isn't immediately avaiable this function 203 + * will either sleep until it is (wait case). Or it selects the current context 204 + * for backing off by returning -EDEADLK (wound case). Trying to acquire the 205 + * same lock with the same context twice is also detected and signalled by 206 + * returning -EALREADY. Returns 0 if the mutex was successfully acquired. 207 + * 208 + * In the wound case the caller must release all currently held w/w mutexes for 209 + * the given context and then wait for this contending lock to be available by 210 + * calling ww_mutex_lock_slow. Alternatively callers can opt to not acquire this 211 + * lock and proceed with trying to acquire further w/w mutexes (e.g. when 212 + * scanning through lru lists trying to free resources). 213 + * 214 + * The mutex must later on be released by the same task that 215 + * acquired it. The task may not exit without first unlocking the mutex. Also, 216 + * kernel memory where the mutex resides must not be freed with the mutex still 217 + * locked. The mutex must first be initialized (or statically defined) before it 218 + * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be 219 + * of the same w/w lock class as was used to initialize the acquire context. 220 + * 221 + * A mutex acquired with this function must be released with ww_mutex_unlock. 222 + */ 223 + static inline int ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 224 + { 225 + if (ctx) 226 + return __ww_mutex_lock(lock, ctx); 227 + 228 + mutex_lock(&lock->base); 229 + return 0; 230 + } 231 + 232 + /** 233 + * ww_mutex_lock_interruptible - acquire the w/w mutex, interruptible 234 + * @lock: the mutex to be acquired 235 + * @ctx: w/w acquire context 236 + * 237 + * Lock the w/w mutex exclusively for this task. 238 + * 239 + * Deadlocks within a given w/w class of locks are detected and handled with the 240 + * wait/wound algorithm. If the lock isn't immediately avaiable this function 241 + * will either sleep until it is (wait case). Or it selects the current context 242 + * for backing off by returning -EDEADLK (wound case). Trying to acquire the 243 + * same lock with the same context twice is also detected and signalled by 244 + * returning -EALREADY. Returns 0 if the mutex was successfully acquired. If a 245 + * signal arrives while waiting for the lock then this function returns -EINTR. 246 + * 247 + * In the wound case the caller must release all currently held w/w mutexes for 248 + * the given context and then wait for this contending lock to be available by 249 + * calling ww_mutex_lock_slow_interruptible. Alternatively callers can opt to 250 + * not acquire this lock and proceed with trying to acquire further w/w mutexes 251 + * (e.g. when scanning through lru lists trying to free resources). 252 + * 253 + * The mutex must later on be released by the same task that 254 + * acquired it. The task may not exit without first unlocking the mutex. Also, 255 + * kernel memory where the mutex resides must not be freed with the mutex still 256 + * locked. The mutex must first be initialized (or statically defined) before it 257 + * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be 258 + * of the same w/w lock class as was used to initialize the acquire context. 259 + * 260 + * A mutex acquired with this function must be released with ww_mutex_unlock. 261 + */ 262 + static inline int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock, 263 + struct ww_acquire_ctx *ctx) 264 + { 265 + if (ctx) 266 + return __ww_mutex_lock_interruptible(lock, ctx); 267 + else 268 + return mutex_lock_interruptible(&lock->base); 269 + } 270 + 271 + /** 272 + * ww_mutex_lock_slow - slowpath acquiring of the w/w mutex 273 + * @lock: the mutex to be acquired 274 + * @ctx: w/w acquire context 275 + * 276 + * Acquires a w/w mutex with the given context after a wound case. This function 277 + * will sleep until the lock becomes available. 278 + * 279 + * The caller must have released all w/w mutexes already acquired with the 280 + * context and then call this function on the contended lock. 281 + * 282 + * Afterwards the caller may continue to (re)acquire the other w/w mutexes it 283 + * needs with ww_mutex_lock. Note that the -EALREADY return code from 284 + * ww_mutex_lock can be used to avoid locking this contended mutex twice. 285 + * 286 + * It is forbidden to call this function with any other w/w mutexes associated 287 + * with the context held. It is forbidden to call this on anything else than the 288 + * contending mutex. 289 + * 290 + * Note that the slowpath lock acquiring can also be done by calling 291 + * ww_mutex_lock directly. This function here is simply to help w/w mutex 292 + * locking code readability by clearly denoting the slowpath. 293 + */ 294 + static inline void 295 + ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 296 + { 297 + int ret; 298 + #ifdef CONFIG_DEBUG_MUTEXES 299 + DEBUG_LOCKS_WARN_ON(!ctx->contending_lock); 300 + #endif 301 + ret = ww_mutex_lock(lock, ctx); 302 + (void)ret; 303 + } 304 + 305 + /** 306 + * ww_mutex_lock_slow_interruptible - slowpath acquiring of the w/w mutex, interruptible 307 + * @lock: the mutex to be acquired 308 + * @ctx: w/w acquire context 309 + * 310 + * Acquires a w/w mutex with the given context after a wound case. This function 311 + * will sleep until the lock becomes available and returns 0 when the lock has 312 + * been acquired. If a signal arrives while waiting for the lock then this 313 + * function returns -EINTR. 314 + * 315 + * The caller must have released all w/w mutexes already acquired with the 316 + * context and then call this function on the contended lock. 317 + * 318 + * Afterwards the caller may continue to (re)acquire the other w/w mutexes it 319 + * needs with ww_mutex_lock. Note that the -EALREADY return code from 320 + * ww_mutex_lock can be used to avoid locking this contended mutex twice. 321 + * 322 + * It is forbidden to call this function with any other w/w mutexes associated 323 + * with the given context held. It is forbidden to call this on anything else 324 + * than the contending mutex. 325 + * 326 + * Note that the slowpath lock acquiring can also be done by calling 327 + * ww_mutex_lock_interruptible directly. This function here is simply to help 328 + * w/w mutex locking code readability by clearly denoting the slowpath. 329 + */ 330 + static inline int __must_check 331 + ww_mutex_lock_slow_interruptible(struct ww_mutex *lock, 332 + struct ww_acquire_ctx *ctx) 333 + { 334 + #ifdef CONFIG_DEBUG_MUTEXES 335 + DEBUG_LOCKS_WARN_ON(!ctx->contending_lock); 336 + #endif 337 + return ww_mutex_lock_interruptible(lock, ctx); 338 + } 339 + 340 + extern void ww_mutex_unlock(struct ww_mutex *lock); 341 + 342 + /** 343 + * ww_mutex_trylock - tries to acquire the w/w mutex without acquire context 344 + * @lock: mutex to lock 345 + * 346 + * Trylocks a mutex without acquire context, so no deadlock detection is 347 + * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise. 348 + */ 349 + static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock) 350 + { 351 + return mutex_trylock(&lock->base); 352 + } 353 + 354 + /*** 355 + * ww_mutex_destroy - mark a w/w mutex unusable 356 + * @lock: the mutex to be destroyed 357 + * 358 + * This function marks the mutex uninitialized, and any subsequent 359 + * use of the mutex is forbidden. The mutex must not be locked when 360 + * this function is called. 361 + */ 362 + static inline void ww_mutex_destroy(struct ww_mutex *lock) 363 + { 364 + mutex_destroy(&lock->base); 365 + } 366 + 367 + /** 368 + * ww_mutex_is_locked - is the w/w mutex locked 369 + * @lock: the mutex to be queried 370 + * 371 + * Returns 1 if the mutex is locked, 0 if unlocked. 372 + */ 373 + static inline bool ww_mutex_is_locked(struct ww_mutex *lock) 374 + { 375 + return mutex_is_locked(&lock->base); 376 + } 377 + 378 + #endif
+1
kernel/mutex.c
··· 18 18 * Also see Documentation/mutex-design.txt. 19 19 */ 20 20 #include <linux/mutex.h> 21 + #include <linux/ww_mutex.h> 21 22 #include <linux/sched.h> 22 23 #include <linux/sched/rt.h> 23 24 #include <linux/export.h>
+1
lib/locking-selftest.c
··· 12 12 */ 13 13 #include <linux/rwsem.h> 14 14 #include <linux/mutex.h> 15 + #include <linux/ww_mutex.h> 15 16 #include <linux/sched.h> 16 17 #include <linux/delay.h> 17 18 #include <linux/lockdep.h>