at v6.15 225 lines 6.1 kB view raw
1#ifndef _LINUX_MMAP_LOCK_H 2#define _LINUX_MMAP_LOCK_H 3 4#include <linux/lockdep.h> 5#include <linux/mm_types.h> 6#include <linux/mmdebug.h> 7#include <linux/rwsem.h> 8#include <linux/tracepoint-defs.h> 9#include <linux/types.h> 10 11#define MMAP_LOCK_INITIALIZER(name) \ 12 .mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock), 13 14DECLARE_TRACEPOINT(mmap_lock_start_locking); 15DECLARE_TRACEPOINT(mmap_lock_acquire_returned); 16DECLARE_TRACEPOINT(mmap_lock_released); 17 18#ifdef CONFIG_TRACING 19 20void __mmap_lock_do_trace_start_locking(struct mm_struct *mm, bool write); 21void __mmap_lock_do_trace_acquire_returned(struct mm_struct *mm, bool write, 22 bool success); 23void __mmap_lock_do_trace_released(struct mm_struct *mm, bool write); 24 25static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm, 26 bool write) 27{ 28 if (tracepoint_enabled(mmap_lock_start_locking)) 29 __mmap_lock_do_trace_start_locking(mm, write); 30} 31 32static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm, 33 bool write, bool success) 34{ 35 if (tracepoint_enabled(mmap_lock_acquire_returned)) 36 __mmap_lock_do_trace_acquire_returned(mm, write, success); 37} 38 39static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write) 40{ 41 if (tracepoint_enabled(mmap_lock_released)) 42 __mmap_lock_do_trace_released(mm, write); 43} 44 45#else /* !CONFIG_TRACING */ 46 47static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm, 48 bool write) 49{ 50} 51 52static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm, 53 bool write, bool success) 54{ 55} 56 57static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write) 58{ 59} 60 61#endif /* CONFIG_TRACING */ 62 63static inline void mmap_assert_locked(const struct mm_struct *mm) 64{ 65 rwsem_assert_held(&mm->mmap_lock); 66} 67 68static inline void mmap_assert_write_locked(const struct mm_struct *mm) 69{ 70 rwsem_assert_held_write(&mm->mmap_lock); 71} 72 73#ifdef CONFIG_PER_VMA_LOCK 74 75static inline void mm_lock_seqcount_init(struct mm_struct *mm) 76{ 77 seqcount_init(&mm->mm_lock_seq); 78} 79 80static inline void mm_lock_seqcount_begin(struct mm_struct *mm) 81{ 82 do_raw_write_seqcount_begin(&mm->mm_lock_seq); 83} 84 85static inline void mm_lock_seqcount_end(struct mm_struct *mm) 86{ 87 ASSERT_EXCLUSIVE_WRITER(mm->mm_lock_seq); 88 do_raw_write_seqcount_end(&mm->mm_lock_seq); 89} 90 91static inline bool mmap_lock_speculate_try_begin(struct mm_struct *mm, unsigned int *seq) 92{ 93 /* 94 * Since mmap_lock is a sleeping lock, and waiting for it to become 95 * unlocked is more or less equivalent with taking it ourselves, don't 96 * bother with the speculative path if mmap_lock is already write-locked 97 * and take the slow path, which takes the lock. 98 */ 99 return raw_seqcount_try_begin(&mm->mm_lock_seq, *seq); 100} 101 102static inline bool mmap_lock_speculate_retry(struct mm_struct *mm, unsigned int seq) 103{ 104 return read_seqcount_retry(&mm->mm_lock_seq, seq); 105} 106 107#else /* CONFIG_PER_VMA_LOCK */ 108 109static inline void mm_lock_seqcount_init(struct mm_struct *mm) {} 110static inline void mm_lock_seqcount_begin(struct mm_struct *mm) {} 111static inline void mm_lock_seqcount_end(struct mm_struct *mm) {} 112 113static inline bool mmap_lock_speculate_try_begin(struct mm_struct *mm, unsigned int *seq) 114{ 115 return false; 116} 117 118static inline bool mmap_lock_speculate_retry(struct mm_struct *mm, unsigned int seq) 119{ 120 return true; 121} 122 123#endif /* CONFIG_PER_VMA_LOCK */ 124 125static inline void mmap_write_lock(struct mm_struct *mm) 126{ 127 __mmap_lock_trace_start_locking(mm, true); 128 down_write(&mm->mmap_lock); 129 mm_lock_seqcount_begin(mm); 130 __mmap_lock_trace_acquire_returned(mm, true, true); 131} 132 133static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass) 134{ 135 __mmap_lock_trace_start_locking(mm, true); 136 down_write_nested(&mm->mmap_lock, subclass); 137 mm_lock_seqcount_begin(mm); 138 __mmap_lock_trace_acquire_returned(mm, true, true); 139} 140 141static inline int mmap_write_lock_killable(struct mm_struct *mm) 142{ 143 int ret; 144 145 __mmap_lock_trace_start_locking(mm, true); 146 ret = down_write_killable(&mm->mmap_lock); 147 if (!ret) 148 mm_lock_seqcount_begin(mm); 149 __mmap_lock_trace_acquire_returned(mm, true, ret == 0); 150 return ret; 151} 152 153/* 154 * Drop all currently-held per-VMA locks. 155 * This is called from the mmap_lock implementation directly before releasing 156 * a write-locked mmap_lock (or downgrading it to read-locked). 157 * This should normally NOT be called manually from other places. 158 * If you want to call this manually anyway, keep in mind that this will release 159 * *all* VMA write locks, including ones from further up the stack. 160 */ 161static inline void vma_end_write_all(struct mm_struct *mm) 162{ 163 mmap_assert_write_locked(mm); 164 mm_lock_seqcount_end(mm); 165} 166 167static inline void mmap_write_unlock(struct mm_struct *mm) 168{ 169 __mmap_lock_trace_released(mm, true); 170 vma_end_write_all(mm); 171 up_write(&mm->mmap_lock); 172} 173 174static inline void mmap_write_downgrade(struct mm_struct *mm) 175{ 176 __mmap_lock_trace_acquire_returned(mm, false, true); 177 vma_end_write_all(mm); 178 downgrade_write(&mm->mmap_lock); 179} 180 181static inline void mmap_read_lock(struct mm_struct *mm) 182{ 183 __mmap_lock_trace_start_locking(mm, false); 184 down_read(&mm->mmap_lock); 185 __mmap_lock_trace_acquire_returned(mm, false, true); 186} 187 188static inline int mmap_read_lock_killable(struct mm_struct *mm) 189{ 190 int ret; 191 192 __mmap_lock_trace_start_locking(mm, false); 193 ret = down_read_killable(&mm->mmap_lock); 194 __mmap_lock_trace_acquire_returned(mm, false, ret == 0); 195 return ret; 196} 197 198static inline bool mmap_read_trylock(struct mm_struct *mm) 199{ 200 bool ret; 201 202 __mmap_lock_trace_start_locking(mm, false); 203 ret = down_read_trylock(&mm->mmap_lock) != 0; 204 __mmap_lock_trace_acquire_returned(mm, false, ret); 205 return ret; 206} 207 208static inline void mmap_read_unlock(struct mm_struct *mm) 209{ 210 __mmap_lock_trace_released(mm, false); 211 up_read(&mm->mmap_lock); 212} 213 214static inline void mmap_read_unlock_non_owner(struct mm_struct *mm) 215{ 216 __mmap_lock_trace_released(mm, false); 217 up_read_non_owner(&mm->mmap_lock); 218} 219 220static inline int mmap_lock_is_contended(struct mm_struct *mm) 221{ 222 return rwsem_is_contended(&mm->mmap_lock); 223} 224 225#endif /* _LINUX_MMAP_LOCK_H */