Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1#ifndef _LINUX_MMAP_LOCK_H
2#define _LINUX_MMAP_LOCK_H
3
4#include <linux/lockdep.h>
5#include <linux/mm_types.h>
6#include <linux/mmdebug.h>
7#include <linux/rwsem.h>
8#include <linux/tracepoint-defs.h>
9#include <linux/types.h>
10
11#define MMAP_LOCK_INITIALIZER(name) \
12 .mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock),
13
14DECLARE_TRACEPOINT(mmap_lock_start_locking);
15DECLARE_TRACEPOINT(mmap_lock_acquire_returned);
16DECLARE_TRACEPOINT(mmap_lock_released);
17
18#ifdef CONFIG_TRACING
19
20void __mmap_lock_do_trace_start_locking(struct mm_struct *mm, bool write);
21void __mmap_lock_do_trace_acquire_returned(struct mm_struct *mm, bool write,
22 bool success);
23void __mmap_lock_do_trace_released(struct mm_struct *mm, bool write);
24
25static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm,
26 bool write)
27{
28 if (tracepoint_enabled(mmap_lock_start_locking))
29 __mmap_lock_do_trace_start_locking(mm, write);
30}
31
32static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm,
33 bool write, bool success)
34{
35 if (tracepoint_enabled(mmap_lock_acquire_returned))
36 __mmap_lock_do_trace_acquire_returned(mm, write, success);
37}
38
39static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write)
40{
41 if (tracepoint_enabled(mmap_lock_released))
42 __mmap_lock_do_trace_released(mm, write);
43}
44
45#else /* !CONFIG_TRACING */
46
47static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm,
48 bool write)
49{
50}
51
52static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm,
53 bool write, bool success)
54{
55}
56
57static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write)
58{
59}
60
61#endif /* CONFIG_TRACING */
62
63static inline void mmap_assert_locked(const struct mm_struct *mm)
64{
65 rwsem_assert_held(&mm->mmap_lock);
66}
67
68static inline void mmap_assert_write_locked(const struct mm_struct *mm)
69{
70 rwsem_assert_held_write(&mm->mmap_lock);
71}
72
73#ifdef CONFIG_PER_VMA_LOCK
74/*
75 * Drop all currently-held per-VMA locks.
76 * This is called from the mmap_lock implementation directly before releasing
77 * a write-locked mmap_lock (or downgrading it to read-locked).
78 * This should normally NOT be called manually from other places.
79 * If you want to call this manually anyway, keep in mind that this will release
80 * *all* VMA write locks, including ones from further up the stack.
81 */
82static inline void vma_end_write_all(struct mm_struct *mm)
83{
84 mmap_assert_write_locked(mm);
85 /*
86 * Nobody can concurrently modify mm->mm_lock_seq due to exclusive
87 * mmap_lock being held.
88 * We need RELEASE semantics here to ensure that preceding stores into
89 * the VMA take effect before we unlock it with this store.
90 * Pairs with ACQUIRE semantics in vma_start_read().
91 */
92 smp_store_release(&mm->mm_lock_seq, mm->mm_lock_seq + 1);
93}
94#else
95static inline void vma_end_write_all(struct mm_struct *mm) {}
96#endif
97
98static inline void mmap_init_lock(struct mm_struct *mm)
99{
100 init_rwsem(&mm->mmap_lock);
101}
102
103static inline void mmap_write_lock(struct mm_struct *mm)
104{
105 __mmap_lock_trace_start_locking(mm, true);
106 down_write(&mm->mmap_lock);
107 __mmap_lock_trace_acquire_returned(mm, true, true);
108}
109
110static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass)
111{
112 __mmap_lock_trace_start_locking(mm, true);
113 down_write_nested(&mm->mmap_lock, subclass);
114 __mmap_lock_trace_acquire_returned(mm, true, true);
115}
116
117static inline int mmap_write_lock_killable(struct mm_struct *mm)
118{
119 int ret;
120
121 __mmap_lock_trace_start_locking(mm, true);
122 ret = down_write_killable(&mm->mmap_lock);
123 __mmap_lock_trace_acquire_returned(mm, true, ret == 0);
124 return ret;
125}
126
127static inline void mmap_write_unlock(struct mm_struct *mm)
128{
129 __mmap_lock_trace_released(mm, true);
130 vma_end_write_all(mm);
131 up_write(&mm->mmap_lock);
132}
133
134static inline void mmap_write_downgrade(struct mm_struct *mm)
135{
136 __mmap_lock_trace_acquire_returned(mm, false, true);
137 vma_end_write_all(mm);
138 downgrade_write(&mm->mmap_lock);
139}
140
141static inline void mmap_read_lock(struct mm_struct *mm)
142{
143 __mmap_lock_trace_start_locking(mm, false);
144 down_read(&mm->mmap_lock);
145 __mmap_lock_trace_acquire_returned(mm, false, true);
146}
147
148static inline int mmap_read_lock_killable(struct mm_struct *mm)
149{
150 int ret;
151
152 __mmap_lock_trace_start_locking(mm, false);
153 ret = down_read_killable(&mm->mmap_lock);
154 __mmap_lock_trace_acquire_returned(mm, false, ret == 0);
155 return ret;
156}
157
158static inline bool mmap_read_trylock(struct mm_struct *mm)
159{
160 bool ret;
161
162 __mmap_lock_trace_start_locking(mm, false);
163 ret = down_read_trylock(&mm->mmap_lock) != 0;
164 __mmap_lock_trace_acquire_returned(mm, false, ret);
165 return ret;
166}
167
168static inline void mmap_read_unlock(struct mm_struct *mm)
169{
170 __mmap_lock_trace_released(mm, false);
171 up_read(&mm->mmap_lock);
172}
173
174static inline void mmap_read_unlock_non_owner(struct mm_struct *mm)
175{
176 __mmap_lock_trace_released(mm, false);
177 up_read_non_owner(&mm->mmap_lock);
178}
179
180static inline int mmap_lock_is_contended(struct mm_struct *mm)
181{
182 return rwsem_is_contended(&mm->mmap_lock);
183}
184
185#endif /* _LINUX_MMAP_LOCK_H */