Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_SCHED_MM_H
3#define _LINUX_SCHED_MM_H
4
5#include <linux/kernel.h>
6#include <linux/atomic.h>
7#include <linux/sched.h>
8#include <linux/mm_types.h>
9#include <linux/gfp.h>
10#include <linux/sync_core.h>
11#include <linux/ioasid.h>
12
13/*
14 * Routines for handling mm_structs
15 */
16extern struct mm_struct *mm_alloc(void);
17
18/**
19 * mmgrab() - Pin a &struct mm_struct.
20 * @mm: The &struct mm_struct to pin.
21 *
22 * Make sure that @mm will not get freed even after the owning task
23 * exits. This doesn't guarantee that the associated address space
24 * will still exist later on and mmget_not_zero() has to be used before
25 * accessing it.
26 *
27 * This is a preferred way to pin @mm for a longer/unbounded amount
28 * of time.
29 *
30 * Use mmdrop() to release the reference acquired by mmgrab().
31 *
32 * See also <Documentation/vm/active_mm.rst> for an in-depth explanation
33 * of &mm_struct.mm_count vs &mm_struct.mm_users.
34 */
35static inline void mmgrab(struct mm_struct *mm)
36{
37 atomic_inc(&mm->mm_count);
38}
39
40extern void __mmdrop(struct mm_struct *mm);
41
42static inline void mmdrop(struct mm_struct *mm)
43{
44 /*
45 * The implicit full barrier implied by atomic_dec_and_test() is
46 * required by the membarrier system call before returning to
47 * user-space, after storing to rq->curr.
48 */
49 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
50 __mmdrop(mm);
51}
52
53#ifdef CONFIG_PREEMPT_RT
54/*
55 * RCU callback for delayed mm drop. Not strictly RCU, but call_rcu() is
56 * by far the least expensive way to do that.
57 */
58static inline void __mmdrop_delayed(struct rcu_head *rhp)
59{
60 struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop);
61
62 __mmdrop(mm);
63}
64
65/*
66 * Invoked from finish_task_switch(). Delegates the heavy lifting on RT
67 * kernels via RCU.
68 */
69static inline void mmdrop_sched(struct mm_struct *mm)
70{
71 /* Provides a full memory barrier. See mmdrop() */
72 if (atomic_dec_and_test(&mm->mm_count))
73 call_rcu(&mm->delayed_drop, __mmdrop_delayed);
74}
75#else
76static inline void mmdrop_sched(struct mm_struct *mm)
77{
78 mmdrop(mm);
79}
80#endif
81
82/**
83 * mmget() - Pin the address space associated with a &struct mm_struct.
84 * @mm: The address space to pin.
85 *
86 * Make sure that the address space of the given &struct mm_struct doesn't
87 * go away. This does not protect against parts of the address space being
88 * modified or freed, however.
89 *
90 * Never use this function to pin this address space for an
91 * unbounded/indefinite amount of time.
92 *
93 * Use mmput() to release the reference acquired by mmget().
94 *
95 * See also <Documentation/vm/active_mm.rst> for an in-depth explanation
96 * of &mm_struct.mm_count vs &mm_struct.mm_users.
97 */
98static inline void mmget(struct mm_struct *mm)
99{
100 atomic_inc(&mm->mm_users);
101}
102
103static inline bool mmget_not_zero(struct mm_struct *mm)
104{
105 return atomic_inc_not_zero(&mm->mm_users);
106}
107
108/* mmput gets rid of the mappings and all user-space */
109extern void mmput(struct mm_struct *);
110#ifdef CONFIG_MMU
111/* same as above but performs the slow path from the async context. Can
112 * be called from the atomic context as well
113 */
114void mmput_async(struct mm_struct *);
115#endif
116
117/* Grab a reference to a task's mm, if it is not already going away */
118extern struct mm_struct *get_task_mm(struct task_struct *task);
119/*
120 * Grab a reference to a task's mm, if it is not already going away
121 * and ptrace_may_access with the mode parameter passed to it
122 * succeeds.
123 */
124extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
125/* Remove the current tasks stale references to the old mm_struct on exit() */
126extern void exit_mm_release(struct task_struct *, struct mm_struct *);
127/* Remove the current tasks stale references to the old mm_struct on exec() */
128extern void exec_mm_release(struct task_struct *, struct mm_struct *);
129
130#ifdef CONFIG_MEMCG
131extern void mm_update_next_owner(struct mm_struct *mm);
132#else
133static inline void mm_update_next_owner(struct mm_struct *mm)
134{
135}
136#endif /* CONFIG_MEMCG */
137
138#ifdef CONFIG_MMU
139extern void arch_pick_mmap_layout(struct mm_struct *mm,
140 struct rlimit *rlim_stack);
141extern unsigned long
142arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
143 unsigned long, unsigned long);
144extern unsigned long
145arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
146 unsigned long len, unsigned long pgoff,
147 unsigned long flags);
148#else
149static inline void arch_pick_mmap_layout(struct mm_struct *mm,
150 struct rlimit *rlim_stack) {}
151#endif
152
153static inline bool in_vfork(struct task_struct *tsk)
154{
155 bool ret;
156
157 /*
158 * need RCU to access ->real_parent if CLONE_VM was used along with
159 * CLONE_PARENT.
160 *
161 * We check real_parent->mm == tsk->mm because CLONE_VFORK does not
162 * imply CLONE_VM
163 *
164 * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus
165 * ->real_parent is not necessarily the task doing vfork(), so in
166 * theory we can't rely on task_lock() if we want to dereference it.
167 *
168 * And in this case we can't trust the real_parent->mm == tsk->mm
169 * check, it can be false negative. But we do not care, if init or
170 * another oom-unkillable task does this it should blame itself.
171 */
172 rcu_read_lock();
173 ret = tsk->vfork_done &&
174 rcu_dereference(tsk->real_parent)->mm == tsk->mm;
175 rcu_read_unlock();
176
177 return ret;
178}
179
180/*
181 * Applies per-task gfp context to the given allocation flags.
182 * PF_MEMALLOC_NOIO implies GFP_NOIO
183 * PF_MEMALLOC_NOFS implies GFP_NOFS
184 * PF_MEMALLOC_PIN implies !GFP_MOVABLE
185 */
186static inline gfp_t current_gfp_context(gfp_t flags)
187{
188 unsigned int pflags = READ_ONCE(current->flags);
189
190 if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS | PF_MEMALLOC_PIN))) {
191 /*
192 * NOIO implies both NOIO and NOFS and it is a weaker context
193 * so always make sure it makes precedence
194 */
195 if (pflags & PF_MEMALLOC_NOIO)
196 flags &= ~(__GFP_IO | __GFP_FS);
197 else if (pflags & PF_MEMALLOC_NOFS)
198 flags &= ~__GFP_FS;
199
200 if (pflags & PF_MEMALLOC_PIN)
201 flags &= ~__GFP_MOVABLE;
202 }
203 return flags;
204}
205
206#ifdef CONFIG_LOCKDEP
207extern void __fs_reclaim_acquire(unsigned long ip);
208extern void __fs_reclaim_release(unsigned long ip);
209extern void fs_reclaim_acquire(gfp_t gfp_mask);
210extern void fs_reclaim_release(gfp_t gfp_mask);
211#else
212static inline void __fs_reclaim_acquire(unsigned long ip) { }
213static inline void __fs_reclaim_release(unsigned long ip) { }
214static inline void fs_reclaim_acquire(gfp_t gfp_mask) { }
215static inline void fs_reclaim_release(gfp_t gfp_mask) { }
216#endif
217
218/* Any memory-allocation retry loop should use
219 * memalloc_retry_wait(), and pass the flags for the most
220 * constrained allocation attempt that might have failed.
221 * This provides useful documentation of where loops are,
222 * and a central place to fine tune the waiting as the MM
223 * implementation changes.
224 */
225static inline void memalloc_retry_wait(gfp_t gfp_flags)
226{
227 /* We use io_schedule_timeout because waiting for memory
228 * typically included waiting for dirty pages to be
229 * written out, which requires IO.
230 */
231 __set_current_state(TASK_UNINTERRUPTIBLE);
232 gfp_flags = current_gfp_context(gfp_flags);
233 if (gfpflags_allow_blocking(gfp_flags) &&
234 !(gfp_flags & __GFP_NORETRY))
235 /* Probably waited already, no need for much more */
236 io_schedule_timeout(1);
237 else
238 /* Probably didn't wait, and has now released a lock,
239 * so now is a good time to wait
240 */
241 io_schedule_timeout(HZ/50);
242}
243
244/**
245 * might_alloc - Mark possible allocation sites
246 * @gfp_mask: gfp_t flags that would be used to allocate
247 *
248 * Similar to might_sleep() and other annotations, this can be used in functions
249 * that might allocate, but often don't. Compiles to nothing without
250 * CONFIG_LOCKDEP. Includes a conditional might_sleep() if @gfp allows blocking.
251 */
252static inline void might_alloc(gfp_t gfp_mask)
253{
254 fs_reclaim_acquire(gfp_mask);
255 fs_reclaim_release(gfp_mask);
256
257 might_sleep_if(gfpflags_allow_blocking(gfp_mask));
258}
259
260/**
261 * memalloc_noio_save - Marks implicit GFP_NOIO allocation scope.
262 *
263 * This functions marks the beginning of the GFP_NOIO allocation scope.
264 * All further allocations will implicitly drop __GFP_IO flag and so
265 * they are safe for the IO critical section from the allocation recursion
266 * point of view. Use memalloc_noio_restore to end the scope with flags
267 * returned by this function.
268 *
269 * This function is safe to be used from any context.
270 */
271static inline unsigned int memalloc_noio_save(void)
272{
273 unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
274 current->flags |= PF_MEMALLOC_NOIO;
275 return flags;
276}
277
278/**
279 * memalloc_noio_restore - Ends the implicit GFP_NOIO scope.
280 * @flags: Flags to restore.
281 *
282 * Ends the implicit GFP_NOIO scope started by memalloc_noio_save function.
283 * Always make sure that the given flags is the return value from the
284 * pairing memalloc_noio_save call.
285 */
286static inline void memalloc_noio_restore(unsigned int flags)
287{
288 current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
289}
290
291/**
292 * memalloc_nofs_save - Marks implicit GFP_NOFS allocation scope.
293 *
294 * This functions marks the beginning of the GFP_NOFS allocation scope.
295 * All further allocations will implicitly drop __GFP_FS flag and so
296 * they are safe for the FS critical section from the allocation recursion
297 * point of view. Use memalloc_nofs_restore to end the scope with flags
298 * returned by this function.
299 *
300 * This function is safe to be used from any context.
301 */
302static inline unsigned int memalloc_nofs_save(void)
303{
304 unsigned int flags = current->flags & PF_MEMALLOC_NOFS;
305 current->flags |= PF_MEMALLOC_NOFS;
306 return flags;
307}
308
309/**
310 * memalloc_nofs_restore - Ends the implicit GFP_NOFS scope.
311 * @flags: Flags to restore.
312 *
313 * Ends the implicit GFP_NOFS scope started by memalloc_nofs_save function.
314 * Always make sure that the given flags is the return value from the
315 * pairing memalloc_nofs_save call.
316 */
317static inline void memalloc_nofs_restore(unsigned int flags)
318{
319 current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags;
320}
321
322static inline unsigned int memalloc_noreclaim_save(void)
323{
324 unsigned int flags = current->flags & PF_MEMALLOC;
325 current->flags |= PF_MEMALLOC;
326 return flags;
327}
328
329static inline void memalloc_noreclaim_restore(unsigned int flags)
330{
331 current->flags = (current->flags & ~PF_MEMALLOC) | flags;
332}
333
334static inline unsigned int memalloc_pin_save(void)
335{
336 unsigned int flags = current->flags & PF_MEMALLOC_PIN;
337
338 current->flags |= PF_MEMALLOC_PIN;
339 return flags;
340}
341
342static inline void memalloc_pin_restore(unsigned int flags)
343{
344 current->flags = (current->flags & ~PF_MEMALLOC_PIN) | flags;
345}
346
347#ifdef CONFIG_MEMCG
348DECLARE_PER_CPU(struct mem_cgroup *, int_active_memcg);
349/**
350 * set_active_memcg - Starts the remote memcg charging scope.
351 * @memcg: memcg to charge.
352 *
353 * This function marks the beginning of the remote memcg charging scope. All the
354 * __GFP_ACCOUNT allocations till the end of the scope will be charged to the
355 * given memcg.
356 *
357 * NOTE: This function can nest. Users must save the return value and
358 * reset the previous value after their own charging scope is over.
359 */
360static inline struct mem_cgroup *
361set_active_memcg(struct mem_cgroup *memcg)
362{
363 struct mem_cgroup *old;
364
365 if (!in_task()) {
366 old = this_cpu_read(int_active_memcg);
367 this_cpu_write(int_active_memcg, memcg);
368 } else {
369 old = current->active_memcg;
370 current->active_memcg = memcg;
371 }
372
373 return old;
374}
375#else
376static inline struct mem_cgroup *
377set_active_memcg(struct mem_cgroup *memcg)
378{
379 return NULL;
380}
381#endif
382
383#ifdef CONFIG_MEMBARRIER
384enum {
385 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0),
386 MEMBARRIER_STATE_PRIVATE_EXPEDITED = (1U << 1),
387 MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = (1U << 2),
388 MEMBARRIER_STATE_GLOBAL_EXPEDITED = (1U << 3),
389 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = (1U << 4),
390 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = (1U << 5),
391 MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY = (1U << 6),
392 MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ = (1U << 7),
393};
394
395enum {
396 MEMBARRIER_FLAG_SYNC_CORE = (1U << 0),
397 MEMBARRIER_FLAG_RSEQ = (1U << 1),
398};
399
400#ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
401#include <asm/membarrier.h>
402#endif
403
404static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
405{
406 if (current->mm != mm)
407 return;
408 if (likely(!(atomic_read(&mm->membarrier_state) &
409 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE)))
410 return;
411 sync_core_before_usermode();
412}
413
414extern void membarrier_exec_mmap(struct mm_struct *mm);
415
416extern void membarrier_update_current_mm(struct mm_struct *next_mm);
417
418#else
419#ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
420static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
421 struct mm_struct *next,
422 struct task_struct *tsk)
423{
424}
425#endif
426static inline void membarrier_exec_mmap(struct mm_struct *mm)
427{
428}
429static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
430{
431}
432static inline void membarrier_update_current_mm(struct mm_struct *next_mm)
433{
434}
435#endif
436
437#ifdef CONFIG_IOMMU_SVA
438static inline void mm_pasid_init(struct mm_struct *mm)
439{
440 mm->pasid = INVALID_IOASID;
441}
442
443/* Associate a PASID with an mm_struct: */
444static inline void mm_pasid_set(struct mm_struct *mm, u32 pasid)
445{
446 mm->pasid = pasid;
447}
448
449static inline void mm_pasid_drop(struct mm_struct *mm)
450{
451 if (pasid_valid(mm->pasid)) {
452 ioasid_free(mm->pasid);
453 mm->pasid = INVALID_IOASID;
454 }
455}
456#else
457static inline void mm_pasid_init(struct mm_struct *mm) {}
458static inline void mm_pasid_set(struct mm_struct *mm, u32 pasid) {}
459static inline void mm_pasid_drop(struct mm_struct *mm) {}
460#endif
461
462#endif /* _LINUX_SCHED_MM_H */