at master 7.6 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Mutexes: blocking mutual exclusion locks 4 * 5 * started by Ingo Molnar: 6 * 7 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 8 * 9 * This file contains the main data structure and API definitions. 10 */ 11#ifndef __LINUX_MUTEX_H 12#define __LINUX_MUTEX_H 13 14#include <asm/current.h> 15#include <linux/list.h> 16#include <linux/spinlock_types.h> 17#include <linux/lockdep.h> 18#include <linux/atomic.h> 19#include <asm/processor.h> 20#include <linux/osq_lock.h> 21#include <linux/debug_locks.h> 22#include <linux/cleanup.h> 23#include <linux/mutex_types.h> 24 25struct device; 26 27#ifdef CONFIG_DEBUG_LOCK_ALLOC 28# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ 29 , .dep_map = { \ 30 .name = #lockname, \ 31 .wait_type_inner = LD_WAIT_SLEEP, \ 32 } 33#else 34# define __DEP_MAP_MUTEX_INITIALIZER(lockname) 35#endif 36 37#ifdef CONFIG_DEBUG_MUTEXES 38 39# define __DEBUG_MUTEX_INITIALIZER(lockname) \ 40 , .magic = &lockname 41 42extern void mutex_destroy(struct mutex *lock); 43 44#else 45 46# define __DEBUG_MUTEX_INITIALIZER(lockname) 47 48static inline void mutex_destroy(struct mutex *lock) {} 49 50#endif 51 52/** 53 * mutex_init - initialize the mutex 54 * @mutex: the mutex to be initialized 55 * 56 * Initialize the mutex to unlocked state. 57 * 58 * It is not allowed to initialize an already locked mutex. 59 */ 60#define mutex_init(mutex) \ 61do { \ 62 static struct lock_class_key __key; \ 63 \ 64 __mutex_init((mutex), #mutex, &__key); \ 65} while (0) 66 67/** 68 * mutex_init_with_key - initialize a mutex with a given lockdep key 69 * @mutex: the mutex to be initialized 70 * @key: the lockdep key to be associated with the mutex 71 * 72 * Initialize the mutex to the unlocked state. 73 * 74 * It is not allowed to initialize an already locked mutex. 75 */ 76#define mutex_init_with_key(mutex, key) __mutex_init((mutex), #mutex, (key)) 77 78#ifndef CONFIG_PREEMPT_RT 79#define __MUTEX_INITIALIZER(lockname) \ 80 { .owner = ATOMIC_LONG_INIT(0) \ 81 , .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ 82 , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \ 83 __DEBUG_MUTEX_INITIALIZER(lockname) \ 84 __DEP_MAP_MUTEX_INITIALIZER(lockname) } 85 86#define DEFINE_MUTEX(mutexname) \ 87 struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) 88 89#ifdef CONFIG_DEBUG_LOCK_ALLOC 90void mutex_init_lockep(struct mutex *lock, const char *name, struct lock_class_key *key); 91 92static inline void __mutex_init(struct mutex *lock, const char *name, 93 struct lock_class_key *key) 94{ 95 mutex_init_lockep(lock, name, key); 96} 97#else 98extern void mutex_init_generic(struct mutex *lock); 99 100static inline void __mutex_init(struct mutex *lock, const char *name, 101 struct lock_class_key *key) 102{ 103 mutex_init_generic(lock); 104} 105#endif /* !CONFIG_DEBUG_LOCK_ALLOC */ 106 107/** 108 * mutex_is_locked - is the mutex locked 109 * @lock: the mutex to be queried 110 * 111 * Returns true if the mutex is locked, false if unlocked. 112 */ 113extern bool mutex_is_locked(struct mutex *lock); 114 115#else /* !CONFIG_PREEMPT_RT */ 116/* 117 * Preempt-RT variant based on rtmutexes. 118 */ 119 120#define __MUTEX_INITIALIZER(mutexname) \ 121{ \ 122 .rtmutex = __RT_MUTEX_BASE_INITIALIZER(mutexname.rtmutex) \ 123 __DEP_MAP_MUTEX_INITIALIZER(mutexname) \ 124} 125 126#define DEFINE_MUTEX(mutexname) \ 127 struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) 128 129#define mutex_is_locked(l) rt_mutex_base_is_locked(&(l)->rtmutex) 130 131#ifdef CONFIG_DEBUG_LOCK_ALLOC 132extern void mutex_rt_init_lockdep(struct mutex *mutex, const char *name, 133 struct lock_class_key *key); 134 135static inline void __mutex_init(struct mutex *lock, const char *name, 136 struct lock_class_key *key) 137{ 138 mutex_rt_init_lockdep(lock, name, key); 139} 140 141#else 142extern void mutex_rt_init_generic(struct mutex *mutex); 143 144static inline void __mutex_init(struct mutex *lock, const char *name, 145 struct lock_class_key *key) 146{ 147 mutex_rt_init_generic(lock); 148} 149#endif /* !CONFIG_LOCKDEP */ 150#endif /* CONFIG_PREEMPT_RT */ 151 152#ifdef CONFIG_DEBUG_MUTEXES 153 154int __must_check __devm_mutex_init(struct device *dev, struct mutex *lock); 155 156#else 157 158static inline int __must_check __devm_mutex_init(struct device *dev, struct mutex *lock) 159{ 160 /* 161 * When CONFIG_DEBUG_MUTEXES is off mutex_destroy() is just a nop so 162 * no really need to register it in the devm subsystem. 163 */ 164 return 0; 165} 166 167#endif 168 169#define __mutex_init_ret(mutex) \ 170({ \ 171 typeof(mutex) mutex_ = (mutex); \ 172 \ 173 mutex_init(mutex_); \ 174 mutex_; \ 175}) 176 177#define devm_mutex_init(dev, mutex) \ 178 __devm_mutex_init(dev, __mutex_init_ret(mutex)) 179 180/* 181 * See kernel/locking/mutex.c for detailed documentation of these APIs. 182 * Also see Documentation/locking/mutex-design.rst. 183 */ 184#ifdef CONFIG_DEBUG_LOCK_ALLOC 185extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); 186extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock); 187extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock, 188 unsigned int subclass); 189extern int __must_check _mutex_lock_killable(struct mutex *lock, 190 unsigned int subclass, struct lockdep_map *nest_lock); 191extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass); 192 193#define mutex_lock(lock) mutex_lock_nested(lock, 0) 194#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0) 195#define mutex_lock_killable(lock) _mutex_lock_killable(lock, 0, NULL) 196#define mutex_lock_io(lock) mutex_lock_io_nested(lock, 0) 197 198#define mutex_lock_nest_lock(lock, nest_lock) \ 199do { \ 200 typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ 201 _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \ 202} while (0) 203 204#define mutex_lock_killable_nest_lock(lock, nest_lock) \ 205( \ 206 typecheck(struct lockdep_map *, &(nest_lock)->dep_map), \ 207 _mutex_lock_killable(lock, 0, &(nest_lock)->dep_map) \ 208) 209 210#define mutex_lock_killable_nested(lock, subclass) \ 211 _mutex_lock_killable(lock, subclass, NULL) 212 213#else 214extern void mutex_lock(struct mutex *lock); 215extern int __must_check mutex_lock_interruptible(struct mutex *lock); 216extern int __must_check mutex_lock_killable(struct mutex *lock); 217extern void mutex_lock_io(struct mutex *lock); 218 219# define mutex_lock_nested(lock, subclass) mutex_lock(lock) 220# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) 221# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock) 222# define mutex_lock_killable_nest_lock(lock, nest_lock) mutex_lock_killable(lock) 223# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock) 224# define mutex_lock_io_nested(lock, subclass) mutex_lock_io(lock) 225#endif 226 227/* 228 * NOTE: mutex_trylock() follows the spin_trylock() convention, 229 * not the down_trylock() convention! 230 * 231 * Returns 1 if the mutex has been acquired successfully, and 0 on contention. 232 */ 233 234#ifdef CONFIG_DEBUG_LOCK_ALLOC 235extern int _mutex_trylock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock); 236 237#define mutex_trylock_nest_lock(lock, nest_lock) \ 238( \ 239 typecheck(struct lockdep_map *, &(nest_lock)->dep_map), \ 240 _mutex_trylock_nest_lock(lock, &(nest_lock)->dep_map) \ 241) 242 243#define mutex_trylock(lock) _mutex_trylock_nest_lock(lock, NULL) 244#else 245extern int mutex_trylock(struct mutex *lock); 246#define mutex_trylock_nest_lock(lock, nest_lock) mutex_trylock(lock) 247#endif 248 249extern void mutex_unlock(struct mutex *lock); 250 251extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); 252 253DEFINE_GUARD(mutex, struct mutex *, mutex_lock(_T), mutex_unlock(_T)) 254DEFINE_GUARD_COND(mutex, _try, mutex_trylock(_T)) 255DEFINE_GUARD_COND(mutex, _intr, mutex_lock_interruptible(_T), _RET == 0) 256 257extern unsigned long mutex_get_owner(struct mutex *lock); 258 259#endif /* __LINUX_MUTEX_H */