at v5.9 389 lines 12 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_SCHED_MM_H 3#define _LINUX_SCHED_MM_H 4 5#include <linux/kernel.h> 6#include <linux/atomic.h> 7#include <linux/sched.h> 8#include <linux/mm_types.h> 9#include <linux/gfp.h> 10#include <linux/sync_core.h> 11 12/* 13 * Routines for handling mm_structs 14 */ 15extern struct mm_struct *mm_alloc(void); 16 17/** 18 * mmgrab() - Pin a &struct mm_struct. 19 * @mm: The &struct mm_struct to pin. 20 * 21 * Make sure that @mm will not get freed even after the owning task 22 * exits. This doesn't guarantee that the associated address space 23 * will still exist later on and mmget_not_zero() has to be used before 24 * accessing it. 25 * 26 * This is a preferred way to pin @mm for a longer/unbounded amount 27 * of time. 28 * 29 * Use mmdrop() to release the reference acquired by mmgrab(). 30 * 31 * See also <Documentation/vm/active_mm.rst> for an in-depth explanation 32 * of &mm_struct.mm_count vs &mm_struct.mm_users. 33 */ 34static inline void mmgrab(struct mm_struct *mm) 35{ 36 atomic_inc(&mm->mm_count); 37} 38 39extern void __mmdrop(struct mm_struct *mm); 40 41static inline void mmdrop(struct mm_struct *mm) 42{ 43 /* 44 * The implicit full barrier implied by atomic_dec_and_test() is 45 * required by the membarrier system call before returning to 46 * user-space, after storing to rq->curr. 47 */ 48 if (unlikely(atomic_dec_and_test(&mm->mm_count))) 49 __mmdrop(mm); 50} 51 52/* 53 * This has to be called after a get_task_mm()/mmget_not_zero() 54 * followed by taking the mmap_lock for writing before modifying the 55 * vmas or anything the coredump pretends not to change from under it. 56 * 57 * It also has to be called when mmgrab() is used in the context of 58 * the process, but then the mm_count refcount is transferred outside 59 * the context of the process to run down_write() on that pinned mm. 60 * 61 * NOTE: find_extend_vma() called from GUP context is the only place 62 * that can modify the "mm" (notably the vm_start/end) under mmap_lock 63 * for reading and outside the context of the process, so it is also 64 * the only case that holds the mmap_lock for reading that must call 65 * this function. Generally if the mmap_lock is hold for reading 66 * there's no need of this check after get_task_mm()/mmget_not_zero(). 67 * 68 * This function can be obsoleted and the check can be removed, after 69 * the coredump code will hold the mmap_lock for writing before 70 * invoking the ->core_dump methods. 71 */ 72static inline bool mmget_still_valid(struct mm_struct *mm) 73{ 74 return likely(!mm->core_state); 75} 76 77/** 78 * mmget() - Pin the address space associated with a &struct mm_struct. 79 * @mm: The address space to pin. 80 * 81 * Make sure that the address space of the given &struct mm_struct doesn't 82 * go away. This does not protect against parts of the address space being 83 * modified or freed, however. 84 * 85 * Never use this function to pin this address space for an 86 * unbounded/indefinite amount of time. 87 * 88 * Use mmput() to release the reference acquired by mmget(). 89 * 90 * See also <Documentation/vm/active_mm.rst> for an in-depth explanation 91 * of &mm_struct.mm_count vs &mm_struct.mm_users. 92 */ 93static inline void mmget(struct mm_struct *mm) 94{ 95 atomic_inc(&mm->mm_users); 96} 97 98static inline bool mmget_not_zero(struct mm_struct *mm) 99{ 100 return atomic_inc_not_zero(&mm->mm_users); 101} 102 103/* mmput gets rid of the mappings and all user-space */ 104extern void mmput(struct mm_struct *); 105#ifdef CONFIG_MMU 106/* same as above but performs the slow path from the async context. Can 107 * be called from the atomic context as well 108 */ 109void mmput_async(struct mm_struct *); 110#endif 111 112/* Grab a reference to a task's mm, if it is not already going away */ 113extern struct mm_struct *get_task_mm(struct task_struct *task); 114/* 115 * Grab a reference to a task's mm, if it is not already going away 116 * and ptrace_may_access with the mode parameter passed to it 117 * succeeds. 118 */ 119extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); 120/* Remove the current tasks stale references to the old mm_struct on exit() */ 121extern void exit_mm_release(struct task_struct *, struct mm_struct *); 122/* Remove the current tasks stale references to the old mm_struct on exec() */ 123extern void exec_mm_release(struct task_struct *, struct mm_struct *); 124 125#ifdef CONFIG_MEMCG 126extern void mm_update_next_owner(struct mm_struct *mm); 127#else 128static inline void mm_update_next_owner(struct mm_struct *mm) 129{ 130} 131#endif /* CONFIG_MEMCG */ 132 133#ifdef CONFIG_MMU 134extern void arch_pick_mmap_layout(struct mm_struct *mm, 135 struct rlimit *rlim_stack); 136extern unsigned long 137arch_get_unmapped_area(struct file *, unsigned long, unsigned long, 138 unsigned long, unsigned long); 139extern unsigned long 140arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, 141 unsigned long len, unsigned long pgoff, 142 unsigned long flags); 143#else 144static inline void arch_pick_mmap_layout(struct mm_struct *mm, 145 struct rlimit *rlim_stack) {} 146#endif 147 148static inline bool in_vfork(struct task_struct *tsk) 149{ 150 bool ret; 151 152 /* 153 * need RCU to access ->real_parent if CLONE_VM was used along with 154 * CLONE_PARENT. 155 * 156 * We check real_parent->mm == tsk->mm because CLONE_VFORK does not 157 * imply CLONE_VM 158 * 159 * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus 160 * ->real_parent is not necessarily the task doing vfork(), so in 161 * theory we can't rely on task_lock() if we want to dereference it. 162 * 163 * And in this case we can't trust the real_parent->mm == tsk->mm 164 * check, it can be false negative. But we do not care, if init or 165 * another oom-unkillable task does this it should blame itself. 166 */ 167 rcu_read_lock(); 168 ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm; 169 rcu_read_unlock(); 170 171 return ret; 172} 173 174/* 175 * Applies per-task gfp context to the given allocation flags. 176 * PF_MEMALLOC_NOIO implies GFP_NOIO 177 * PF_MEMALLOC_NOFS implies GFP_NOFS 178 */ 179static inline gfp_t current_gfp_context(gfp_t flags) 180{ 181 unsigned int pflags = READ_ONCE(current->flags); 182 183 if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS))) { 184 /* 185 * NOIO implies both NOIO and NOFS and it is a weaker context 186 * so always make sure it makes precedence 187 */ 188 if (pflags & PF_MEMALLOC_NOIO) 189 flags &= ~(__GFP_IO | __GFP_FS); 190 else if (pflags & PF_MEMALLOC_NOFS) 191 flags &= ~__GFP_FS; 192 } 193 return flags; 194} 195 196#ifdef CONFIG_LOCKDEP 197extern void __fs_reclaim_acquire(void); 198extern void __fs_reclaim_release(void); 199extern void fs_reclaim_acquire(gfp_t gfp_mask); 200extern void fs_reclaim_release(gfp_t gfp_mask); 201#else 202static inline void __fs_reclaim_acquire(void) { } 203static inline void __fs_reclaim_release(void) { } 204static inline void fs_reclaim_acquire(gfp_t gfp_mask) { } 205static inline void fs_reclaim_release(gfp_t gfp_mask) { } 206#endif 207 208/** 209 * memalloc_noio_save - Marks implicit GFP_NOIO allocation scope. 210 * 211 * This functions marks the beginning of the GFP_NOIO allocation scope. 212 * All further allocations will implicitly drop __GFP_IO flag and so 213 * they are safe for the IO critical section from the allocation recursion 214 * point of view. Use memalloc_noio_restore to end the scope with flags 215 * returned by this function. 216 * 217 * This function is safe to be used from any context. 218 */ 219static inline unsigned int memalloc_noio_save(void) 220{ 221 unsigned int flags = current->flags & PF_MEMALLOC_NOIO; 222 current->flags |= PF_MEMALLOC_NOIO; 223 return flags; 224} 225 226/** 227 * memalloc_noio_restore - Ends the implicit GFP_NOIO scope. 228 * @flags: Flags to restore. 229 * 230 * Ends the implicit GFP_NOIO scope started by memalloc_noio_save function. 231 * Always make sure that the given flags is the return value from the 232 * pairing memalloc_noio_save call. 233 */ 234static inline void memalloc_noio_restore(unsigned int flags) 235{ 236 current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags; 237} 238 239/** 240 * memalloc_nofs_save - Marks implicit GFP_NOFS allocation scope. 241 * 242 * This functions marks the beginning of the GFP_NOFS allocation scope. 243 * All further allocations will implicitly drop __GFP_FS flag and so 244 * they are safe for the FS critical section from the allocation recursion 245 * point of view. Use memalloc_nofs_restore to end the scope with flags 246 * returned by this function. 247 * 248 * This function is safe to be used from any context. 249 */ 250static inline unsigned int memalloc_nofs_save(void) 251{ 252 unsigned int flags = current->flags & PF_MEMALLOC_NOFS; 253 current->flags |= PF_MEMALLOC_NOFS; 254 return flags; 255} 256 257/** 258 * memalloc_nofs_restore - Ends the implicit GFP_NOFS scope. 259 * @flags: Flags to restore. 260 * 261 * Ends the implicit GFP_NOFS scope started by memalloc_nofs_save function. 262 * Always make sure that the given flags is the return value from the 263 * pairing memalloc_nofs_save call. 264 */ 265static inline void memalloc_nofs_restore(unsigned int flags) 266{ 267 current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags; 268} 269 270static inline unsigned int memalloc_noreclaim_save(void) 271{ 272 unsigned int flags = current->flags & PF_MEMALLOC; 273 current->flags |= PF_MEMALLOC; 274 return flags; 275} 276 277static inline void memalloc_noreclaim_restore(unsigned int flags) 278{ 279 current->flags = (current->flags & ~PF_MEMALLOC) | flags; 280} 281 282#ifdef CONFIG_CMA 283static inline unsigned int memalloc_nocma_save(void) 284{ 285 unsigned int flags = current->flags & PF_MEMALLOC_NOCMA; 286 287 current->flags |= PF_MEMALLOC_NOCMA; 288 return flags; 289} 290 291static inline void memalloc_nocma_restore(unsigned int flags) 292{ 293 current->flags = (current->flags & ~PF_MEMALLOC_NOCMA) | flags; 294} 295#else 296static inline unsigned int memalloc_nocma_save(void) 297{ 298 return 0; 299} 300 301static inline void memalloc_nocma_restore(unsigned int flags) 302{ 303} 304#endif 305 306#ifdef CONFIG_MEMCG 307/** 308 * memalloc_use_memcg - Starts the remote memcg charging scope. 309 * @memcg: memcg to charge. 310 * 311 * This function marks the beginning of the remote memcg charging scope. All the 312 * __GFP_ACCOUNT allocations till the end of the scope will be charged to the 313 * given memcg. 314 * 315 * NOTE: This function is not nesting safe. 316 */ 317static inline void memalloc_use_memcg(struct mem_cgroup *memcg) 318{ 319 WARN_ON_ONCE(current->active_memcg); 320 current->active_memcg = memcg; 321} 322 323/** 324 * memalloc_unuse_memcg - Ends the remote memcg charging scope. 325 * 326 * This function marks the end of the remote memcg charging scope started by 327 * memalloc_use_memcg(). 328 */ 329static inline void memalloc_unuse_memcg(void) 330{ 331 current->active_memcg = NULL; 332} 333#else 334static inline void memalloc_use_memcg(struct mem_cgroup *memcg) 335{ 336} 337 338static inline void memalloc_unuse_memcg(void) 339{ 340} 341#endif 342 343#ifdef CONFIG_MEMBARRIER 344enum { 345 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0), 346 MEMBARRIER_STATE_PRIVATE_EXPEDITED = (1U << 1), 347 MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = (1U << 2), 348 MEMBARRIER_STATE_GLOBAL_EXPEDITED = (1U << 3), 349 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = (1U << 4), 350 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = (1U << 5), 351}; 352 353enum { 354 MEMBARRIER_FLAG_SYNC_CORE = (1U << 0), 355}; 356 357#ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS 358#include <asm/membarrier.h> 359#endif 360 361static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm) 362{ 363 if (current->mm != mm) 364 return; 365 if (likely(!(atomic_read(&mm->membarrier_state) & 366 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE))) 367 return; 368 sync_core_before_usermode(); 369} 370 371extern void membarrier_exec_mmap(struct mm_struct *mm); 372 373#else 374#ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS 375static inline void membarrier_arch_switch_mm(struct mm_struct *prev, 376 struct mm_struct *next, 377 struct task_struct *tsk) 378{ 379} 380#endif 381static inline void membarrier_exec_mmap(struct mm_struct *mm) 382{ 383} 384static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm) 385{ 386} 387#endif 388 389#endif /* _LINUX_SCHED_MM_H */