Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1#ifndef _LINUX_MMAP_LOCK_H
2#define _LINUX_MMAP_LOCK_H
3
4#include <linux/lockdep.h>
5#include <linux/mm_types.h>
6#include <linux/mmdebug.h>
7#include <linux/rwsem.h>
8#include <linux/tracepoint-defs.h>
9#include <linux/types.h>
10
11#define MMAP_LOCK_INITIALIZER(name) \
12 .mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock),
13
14DECLARE_TRACEPOINT(mmap_lock_start_locking);
15DECLARE_TRACEPOINT(mmap_lock_acquire_returned);
16DECLARE_TRACEPOINT(mmap_lock_released);
17
18#ifdef CONFIG_TRACING
19
20void __mmap_lock_do_trace_start_locking(struct mm_struct *mm, bool write);
21void __mmap_lock_do_trace_acquire_returned(struct mm_struct *mm, bool write,
22 bool success);
23void __mmap_lock_do_trace_released(struct mm_struct *mm, bool write);
24
25static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm,
26 bool write)
27{
28 if (tracepoint_enabled(mmap_lock_start_locking))
29 __mmap_lock_do_trace_start_locking(mm, write);
30}
31
32static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm,
33 bool write, bool success)
34{
35 if (tracepoint_enabled(mmap_lock_acquire_returned))
36 __mmap_lock_do_trace_acquire_returned(mm, write, success);
37}
38
39static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write)
40{
41 if (tracepoint_enabled(mmap_lock_released))
42 __mmap_lock_do_trace_released(mm, write);
43}
44
45#else /* !CONFIG_TRACING */
46
47static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm,
48 bool write)
49{
50}
51
52static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm,
53 bool write, bool success)
54{
55}
56
57static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write)
58{
59}
60
61#endif /* CONFIG_TRACING */
62
63static inline void mmap_assert_locked(struct mm_struct *mm)
64{
65 lockdep_assert_held(&mm->mmap_lock);
66 VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
67}
68
69static inline void mmap_assert_write_locked(struct mm_struct *mm)
70{
71 lockdep_assert_held_write(&mm->mmap_lock);
72 VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
73}
74
75#ifdef CONFIG_PER_VMA_LOCK
76static inline void vma_end_write_all(struct mm_struct *mm)
77{
78 mmap_assert_write_locked(mm);
79 /*
80 * Nobody can concurrently modify mm->mm_lock_seq due to exclusive
81 * mmap_lock being held.
82 * We need RELEASE semantics here to ensure that preceding stores into
83 * the VMA take effect before we unlock it with this store.
84 * Pairs with ACQUIRE semantics in vma_start_read().
85 */
86 smp_store_release(&mm->mm_lock_seq, mm->mm_lock_seq + 1);
87}
88#else
89static inline void vma_end_write_all(struct mm_struct *mm) {}
90#endif
91
92static inline void mmap_init_lock(struct mm_struct *mm)
93{
94 init_rwsem(&mm->mmap_lock);
95}
96
97static inline void mmap_write_lock(struct mm_struct *mm)
98{
99 __mmap_lock_trace_start_locking(mm, true);
100 down_write(&mm->mmap_lock);
101 __mmap_lock_trace_acquire_returned(mm, true, true);
102}
103
104static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass)
105{
106 __mmap_lock_trace_start_locking(mm, true);
107 down_write_nested(&mm->mmap_lock, subclass);
108 __mmap_lock_trace_acquire_returned(mm, true, true);
109}
110
111static inline int mmap_write_lock_killable(struct mm_struct *mm)
112{
113 int ret;
114
115 __mmap_lock_trace_start_locking(mm, true);
116 ret = down_write_killable(&mm->mmap_lock);
117 __mmap_lock_trace_acquire_returned(mm, true, ret == 0);
118 return ret;
119}
120
121static inline bool mmap_write_trylock(struct mm_struct *mm)
122{
123 bool ret;
124
125 __mmap_lock_trace_start_locking(mm, true);
126 ret = down_write_trylock(&mm->mmap_lock) != 0;
127 __mmap_lock_trace_acquire_returned(mm, true, ret);
128 return ret;
129}
130
131static inline void mmap_write_unlock(struct mm_struct *mm)
132{
133 __mmap_lock_trace_released(mm, true);
134 vma_end_write_all(mm);
135 up_write(&mm->mmap_lock);
136}
137
138static inline void mmap_write_downgrade(struct mm_struct *mm)
139{
140 __mmap_lock_trace_acquire_returned(mm, false, true);
141 vma_end_write_all(mm);
142 downgrade_write(&mm->mmap_lock);
143}
144
145static inline void mmap_read_lock(struct mm_struct *mm)
146{
147 __mmap_lock_trace_start_locking(mm, false);
148 down_read(&mm->mmap_lock);
149 __mmap_lock_trace_acquire_returned(mm, false, true);
150}
151
152static inline int mmap_read_lock_killable(struct mm_struct *mm)
153{
154 int ret;
155
156 __mmap_lock_trace_start_locking(mm, false);
157 ret = down_read_killable(&mm->mmap_lock);
158 __mmap_lock_trace_acquire_returned(mm, false, ret == 0);
159 return ret;
160}
161
162static inline bool mmap_read_trylock(struct mm_struct *mm)
163{
164 bool ret;
165
166 __mmap_lock_trace_start_locking(mm, false);
167 ret = down_read_trylock(&mm->mmap_lock) != 0;
168 __mmap_lock_trace_acquire_returned(mm, false, ret);
169 return ret;
170}
171
172static inline void mmap_read_unlock(struct mm_struct *mm)
173{
174 __mmap_lock_trace_released(mm, false);
175 up_read(&mm->mmap_lock);
176}
177
178static inline void mmap_read_unlock_non_owner(struct mm_struct *mm)
179{
180 __mmap_lock_trace_released(mm, false);
181 up_read_non_owner(&mm->mmap_lock);
182}
183
184static inline int mmap_lock_is_contended(struct mm_struct *mm)
185{
186 return rwsem_is_contended(&mm->mmap_lock);
187}
188
189#endif /* _LINUX_MMAP_LOCK_H */