Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: convert core mm to mm_flags_*() accessors

As part of the effort to move to mm->flags becoming a bitmap field,
convert existing users to making use of the mm_flags_*() accessors which
will, when the conversion is complete, be the only means of accessing
mm_struct flags.

This will result in the debug output being that of a bitmap output, which
will result in a minor change here, but since this is for debug only, this
should have no bearing.

Otherwise, no functional changes intended.

[akpm@linux-foundation.org: fix typo in comment]Link: https://lkml.kernel.org/r/1eb2266f4408798a55bda00cb04545a3203aa572.1755012943.git.lorenzo.stoakes@oracle.com
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Reviewed-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Andreas Larsson <andreas@gaisler.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Barry Song <baohua@kernel.org>
Cc: Ben Segall <bsegall@google.com>
Cc: Borislav Betkov <bp@alien8.de>
Cc: Chengming Zhou <chengming.zhou@linux.dev>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jann Horn <jannh@google.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Kees Cook <kees@kernel.org>
Cc: Marc Rutland <mark.rutland@arm.com>
Cc: Mariano Pache <npache@redhat.com>
Cc: "Masami Hiramatsu (Google)" <mhiramat@kernel.org>
Cc: Mateusz Guzik <mjguzik@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mel Gorman <mgorman <mgorman@suse.de>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Namhyung kim <namhyung@kernel.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Thomas Gleinxer <tglx@linutronix.de>
Cc: Valentin Schneider <vschneid@redhat.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: xu xin <xu.xin16@zte.com.cn>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Lorenzo Stoakes and committed by
Andrew Morton
12e423ba bb6525f2

+63 -61
+1 -1
include/linux/huge_mm.h
··· 327 327 * example, s390 kvm. 328 328 */ 329 329 return (vm_flags & VM_NOHUGEPAGE) || 330 - test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags); 330 + mm_flags_test(MMF_DISABLE_THP, vma->vm_mm); 331 331 } 332 332 333 333 static inline bool thp_disabled_by_hw(void)
+4 -2
include/linux/khugepaged.h
··· 2 2 #ifndef _LINUX_KHUGEPAGED_H 3 3 #define _LINUX_KHUGEPAGED_H 4 4 5 + #include <linux/mm.h> 6 + 5 7 extern unsigned int khugepaged_max_ptes_none __read_mostly; 6 8 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 7 9 extern struct attribute_group khugepaged_attr_group; ··· 22 20 23 21 static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm) 24 22 { 25 - if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags)) 23 + if (mm_flags_test(MMF_VM_HUGEPAGE, oldmm)) 26 24 __khugepaged_enter(mm); 27 25 } 28 26 29 27 static inline void khugepaged_exit(struct mm_struct *mm) 30 28 { 31 - if (test_bit(MMF_VM_HUGEPAGE, &mm->flags)) 29 + if (mm_flags_test(MMF_VM_HUGEPAGE, mm)) 32 30 __khugepaged_exit(mm); 33 31 } 34 32 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
+3 -3
include/linux/ksm.h
··· 56 56 static inline void ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) 57 57 { 58 58 /* Adding mm to ksm is best effort on fork. */ 59 - if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) 59 + if (mm_flags_test(MMF_VM_MERGEABLE, oldmm)) 60 60 __ksm_enter(mm); 61 61 } 62 62 63 63 static inline int ksm_execve(struct mm_struct *mm) 64 64 { 65 - if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) 65 + if (mm_flags_test(MMF_VM_MERGE_ANY, mm)) 66 66 return __ksm_enter(mm); 67 67 68 68 return 0; ··· 70 70 71 71 static inline void ksm_exit(struct mm_struct *mm) 72 72 { 73 - if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) 73 + if (mm_flags_test(MMF_VM_MERGEABLE, mm)) 74 74 __ksm_exit(mm); 75 75 } 76 76
+1 -1
include/linux/mm.h
··· 1949 1949 { 1950 1950 VM_BUG_ON(!(raw_read_seqcount(&vma->vm_mm->write_protect_seq) & 1)); 1951 1951 1952 - if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags)) 1952 + if (!mm_flags_test(MMF_HAS_PINNED, vma->vm_mm)) 1953 1953 return false; 1954 1954 1955 1955 return folio_maybe_dma_pinned(folio);
+1 -1
include/linux/mman.h
··· 201 201 static inline bool map_deny_write_exec(unsigned long old, unsigned long new) 202 202 { 203 203 /* If MDWE is disabled, we have nothing to deny. */ 204 - if (!test_bit(MMF_HAS_MDWE, &current->mm->flags)) 204 + if (!mm_flags_test(MMF_HAS_MDWE, current->mm)) 205 205 return false; 206 206 207 207 /* If the new VMA is not executable, we have nothing to deny. */
+1 -1
include/linux/oom.h
··· 91 91 */ 92 92 static inline vm_fault_t check_stable_address_space(struct mm_struct *mm) 93 93 { 94 - if (unlikely(test_bit(MMF_UNSTABLE, &mm->flags))) 94 + if (unlikely(mm_flags_test(MMF_UNSTABLE, mm))) 95 95 return VM_FAULT_SIGBUS; 96 96 return 0; 97 97 }
+2 -2
mm/debug.c
··· 182 182 "start_code %lx end_code %lx start_data %lx end_data %lx\n" 183 183 "start_brk %lx brk %lx start_stack %lx\n" 184 184 "arg_start %lx arg_end %lx env_start %lx env_end %lx\n" 185 - "binfmt %px flags %lx\n" 185 + "binfmt %px flags %*pb\n" 186 186 #ifdef CONFIG_AIO 187 187 "ioctx_table %px\n" 188 188 #endif ··· 211 211 mm->start_code, mm->end_code, mm->start_data, mm->end_data, 212 212 mm->start_brk, mm->brk, mm->start_stack, 213 213 mm->arg_start, mm->arg_end, mm->env_start, mm->env_end, 214 - mm->binfmt, mm->flags, 214 + mm->binfmt, NUM_MM_FLAG_BITS, __mm_flags_get_bitmap(mm), 215 215 #ifdef CONFIG_AIO 216 216 mm->ioctx_table, 217 217 #endif
+5 -5
mm/gup.c
··· 475 475 * lifecycle. Avoid setting the bit unless necessary, or it might cause write 476 476 * cache bouncing on large SMP machines for concurrent pinned gups. 477 477 */ 478 - static inline void mm_set_has_pinned_flag(unsigned long *mm_flags) 478 + static inline void mm_set_has_pinned_flag(struct mm_struct *mm) 479 479 { 480 - if (!test_bit(MMF_HAS_PINNED, mm_flags)) 481 - set_bit(MMF_HAS_PINNED, mm_flags); 480 + if (!mm_flags_test(MMF_HAS_PINNED, mm)) 481 + mm_flags_set(MMF_HAS_PINNED, mm); 482 482 } 483 483 484 484 #ifdef CONFIG_MMU ··· 1693 1693 mmap_assert_locked(mm); 1694 1694 1695 1695 if (flags & FOLL_PIN) 1696 - mm_set_has_pinned_flag(&mm->flags); 1696 + mm_set_has_pinned_flag(mm); 1697 1697 1698 1698 /* 1699 1699 * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior ··· 3210 3210 return -EINVAL; 3211 3211 3212 3212 if (gup_flags & FOLL_PIN) 3213 - mm_set_has_pinned_flag(&current->mm->flags); 3213 + mm_set_has_pinned_flag(current->mm); 3214 3214 3215 3215 if (!(gup_flags & FOLL_FAST_ONLY)) 3216 3216 might_lock_read(&current->mm->mmap_lock);
+4 -4
mm/huge_memory.c
··· 251 251 if (IS_ENABLED(CONFIG_PERSISTENT_HUGE_ZERO_FOLIO)) 252 252 return huge_zero_folio; 253 253 254 - if (test_bit(MMF_HUGE_ZERO_FOLIO, &mm->flags)) 254 + if (mm_flags_test(MMF_HUGE_ZERO_FOLIO, mm)) 255 255 return READ_ONCE(huge_zero_folio); 256 256 257 257 if (!get_huge_zero_folio()) 258 258 return NULL; 259 259 260 - if (test_and_set_bit(MMF_HUGE_ZERO_FOLIO, &mm->flags)) 260 + if (mm_flags_test_and_set(MMF_HUGE_ZERO_FOLIO, mm)) 261 261 put_huge_zero_folio(); 262 262 263 263 return READ_ONCE(huge_zero_folio); ··· 268 268 if (IS_ENABLED(CONFIG_PERSISTENT_HUGE_ZERO_FOLIO)) 269 269 return; 270 270 271 - if (test_bit(MMF_HUGE_ZERO_FOLIO, &mm->flags)) 271 + if (mm_flags_test(MMF_HUGE_ZERO_FOLIO, mm)) 272 272 put_huge_zero_folio(); 273 273 } 274 274 ··· 1145 1145 1146 1146 off_sub = (off - ret) & (size - 1); 1147 1147 1148 - if (test_bit(MMF_TOPDOWN, &current->mm->flags) && !off_sub) 1148 + if (mm_flags_test(MMF_TOPDOWN, current->mm) && !off_sub) 1149 1149 return ret + size; 1150 1150 1151 1151 ret += off_sub;
+5 -5
mm/khugepaged.c
··· 410 410 static inline int hpage_collapse_test_exit_or_disable(struct mm_struct *mm) 411 411 { 412 412 return hpage_collapse_test_exit(mm) || 413 - test_bit(MMF_DISABLE_THP, &mm->flags); 413 + mm_flags_test(MMF_DISABLE_THP, mm); 414 414 } 415 415 416 416 static bool hugepage_pmd_enabled(void) ··· 445 445 446 446 /* __khugepaged_exit() must not run from under us */ 447 447 VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm); 448 - if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) 448 + if (unlikely(mm_flags_test_and_set(MMF_VM_HUGEPAGE, mm))) 449 449 return; 450 450 451 451 mm_slot = mm_slot_alloc(mm_slot_cache); ··· 472 472 void khugepaged_enter_vma(struct vm_area_struct *vma, 473 473 vm_flags_t vm_flags) 474 474 { 475 - if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) && 475 + if (!mm_flags_test(MMF_VM_HUGEPAGE, vma->vm_mm) && 476 476 hugepage_pmd_enabled()) { 477 477 if (thp_vma_allowable_order(vma, vm_flags, TVA_ENFORCE_SYSFS, 478 478 PMD_ORDER)) ··· 497 497 spin_unlock(&khugepaged_mm_lock); 498 498 499 499 if (free) { 500 - clear_bit(MMF_VM_HUGEPAGE, &mm->flags); 500 + mm_flags_clear(MMF_VM_HUGEPAGE, mm); 501 501 mm_slot_free(mm_slot_cache, mm_slot); 502 502 mmdrop(mm); 503 503 } else if (mm_slot) { ··· 1459 1459 /* 1460 1460 * Not strictly needed because the mm exited already. 1461 1461 * 1462 - * clear_bit(MMF_VM_HUGEPAGE, &mm->flags); 1462 + * mm_flags_clear(MMF_VM_HUGEPAGE, mm); 1463 1463 */ 1464 1464 1465 1465 /* khugepaged_mm_lock actually not necessary for the below */
+16 -16
mm/ksm.c
··· 1217 1217 spin_unlock(&ksm_mmlist_lock); 1218 1218 1219 1219 mm_slot_free(mm_slot_cache, mm_slot); 1220 - clear_bit(MMF_VM_MERGEABLE, &mm->flags); 1221 - clear_bit(MMF_VM_MERGE_ANY, &mm->flags); 1220 + mm_flags_clear(MMF_VM_MERGEABLE, mm); 1221 + mm_flags_clear(MMF_VM_MERGE_ANY, mm); 1222 1222 mmdrop(mm); 1223 1223 } else 1224 1224 spin_unlock(&ksm_mmlist_lock); ··· 2620 2620 spin_unlock(&ksm_mmlist_lock); 2621 2621 2622 2622 mm_slot_free(mm_slot_cache, mm_slot); 2623 - clear_bit(MMF_VM_MERGEABLE, &mm->flags); 2624 - clear_bit(MMF_VM_MERGE_ANY, &mm->flags); 2623 + mm_flags_clear(MMF_VM_MERGEABLE, mm); 2624 + mm_flags_clear(MMF_VM_MERGE_ANY, mm); 2625 2625 mmap_read_unlock(mm); 2626 2626 mmdrop(mm); 2627 2627 } else { ··· 2742 2742 vm_flags_t ksm_vma_flags(const struct mm_struct *mm, const struct file *file, 2743 2743 vm_flags_t vm_flags) 2744 2744 { 2745 - if (test_bit(MMF_VM_MERGE_ANY, &mm->flags) && 2745 + if (mm_flags_test(MMF_VM_MERGE_ANY, mm) && 2746 2746 __ksm_should_add_vma(file, vm_flags)) 2747 2747 vm_flags |= VM_MERGEABLE; 2748 2748 ··· 2784 2784 { 2785 2785 int err; 2786 2786 2787 - if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) 2787 + if (mm_flags_test(MMF_VM_MERGE_ANY, mm)) 2788 2788 return 0; 2789 2789 2790 - if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { 2790 + if (!mm_flags_test(MMF_VM_MERGEABLE, mm)) { 2791 2791 err = __ksm_enter(mm); 2792 2792 if (err) 2793 2793 return err; 2794 2794 } 2795 2795 2796 - set_bit(MMF_VM_MERGE_ANY, &mm->flags); 2796 + mm_flags_set(MMF_VM_MERGE_ANY, mm); 2797 2797 ksm_add_vmas(mm); 2798 2798 2799 2799 return 0; ··· 2815 2815 { 2816 2816 int err; 2817 2817 2818 - if (!test_bit(MMF_VM_MERGE_ANY, &mm->flags)) 2818 + if (!mm_flags_test(MMF_VM_MERGE_ANY, mm)) 2819 2819 return 0; 2820 2820 2821 2821 err = ksm_del_vmas(mm); ··· 2824 2824 return err; 2825 2825 } 2826 2826 2827 - clear_bit(MMF_VM_MERGE_ANY, &mm->flags); 2827 + mm_flags_clear(MMF_VM_MERGE_ANY, mm); 2828 2828 return 0; 2829 2829 } 2830 2830 ··· 2832 2832 { 2833 2833 mmap_assert_write_locked(mm); 2834 2834 2835 - if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) 2835 + if (!mm_flags_test(MMF_VM_MERGEABLE, mm)) 2836 2836 return 0; 2837 - if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) 2837 + if (mm_flags_test(MMF_VM_MERGE_ANY, mm)) 2838 2838 return ksm_disable_merge_any(mm); 2839 2839 return ksm_del_vmas(mm); 2840 2840 } ··· 2852 2852 if (!vma_ksm_compatible(vma)) 2853 2853 return 0; 2854 2854 2855 - if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { 2855 + if (!mm_flags_test(MMF_VM_MERGEABLE, mm)) { 2856 2856 err = __ksm_enter(mm); 2857 2857 if (err) 2858 2858 return err; ··· 2912 2912 list_add_tail(&slot->mm_node, &ksm_scan.mm_slot->slot.mm_node); 2913 2913 spin_unlock(&ksm_mmlist_lock); 2914 2914 2915 - set_bit(MMF_VM_MERGEABLE, &mm->flags); 2915 + mm_flags_set(MMF_VM_MERGEABLE, mm); 2916 2916 mmgrab(mm); 2917 2917 2918 2918 if (needs_wakeup) ··· 2954 2954 2955 2955 if (easy_to_free) { 2956 2956 mm_slot_free(mm_slot_cache, mm_slot); 2957 - clear_bit(MMF_VM_MERGE_ANY, &mm->flags); 2958 - clear_bit(MMF_VM_MERGEABLE, &mm->flags); 2957 + mm_flags_clear(MMF_VM_MERGE_ANY, mm); 2958 + mm_flags_clear(MMF_VM_MERGEABLE, mm); 2959 2959 mmdrop(mm); 2960 2960 } else if (mm_slot) { 2961 2961 mmap_write_lock(mm);
+4 -4
mm/mmap.c
··· 802 802 unsigned long pgoff, unsigned long flags, 803 803 vm_flags_t vm_flags) 804 804 { 805 - if (test_bit(MMF_TOPDOWN, &mm->flags)) 805 + if (mm_flags_test(MMF_TOPDOWN, mm)) 806 806 return arch_get_unmapped_area_topdown(filp, addr, len, pgoff, 807 807 flags, vm_flags); 808 808 return arch_get_unmapped_area(filp, addr, len, pgoff, flags, vm_flags); ··· 1284 1284 * Set MMF_OOM_SKIP to hide this task from the oom killer/reaper 1285 1285 * because the memory has been already freed. 1286 1286 */ 1287 - set_bit(MMF_OOM_SKIP, &mm->flags); 1287 + mm_flags_set(MMF_OOM_SKIP, mm); 1288 1288 mmap_write_lock(mm); 1289 1289 mt_clear_in_rcu(&mm->mm_mt); 1290 1290 vma_iter_set(&vmi, vma->vm_end); ··· 1859 1859 mas_set_range(&vmi.mas, mpnt->vm_start, mpnt->vm_end - 1); 1860 1860 mas_store(&vmi.mas, XA_ZERO_ENTRY); 1861 1861 /* Avoid OOM iterating a broken tree */ 1862 - set_bit(MMF_OOM_SKIP, &mm->flags); 1862 + mm_flags_set(MMF_OOM_SKIP, mm); 1863 1863 } 1864 1864 /* 1865 1865 * The mm_struct is going to exit, but the locks will be dropped 1866 1866 * first. Set the mm_struct as unstable is advisable as it is 1867 1867 * not fully initialised. 1868 1868 */ 1869 - set_bit(MMF_UNSTABLE, &mm->flags); 1869 + mm_flags_set(MMF_UNSTABLE, mm); 1870 1870 } 1871 1871 out: 1872 1872 mmap_write_unlock(mm);
+13 -13
mm/oom_kill.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 2 /* 3 3 * linux/mm/oom_kill.c 4 - * 4 + * 5 5 * Copyright (C) 1998,2000 Rik van Riel 6 6 * Thanks go out to Claus Fischer for some serious inspiration and 7 7 * for goading me into coding this file... ··· 218 218 */ 219 219 adj = (long)p->signal->oom_score_adj; 220 220 if (adj == OOM_SCORE_ADJ_MIN || 221 - test_bit(MMF_OOM_SKIP, &p->mm->flags) || 221 + mm_flags_test(MMF_OOM_SKIP, p->mm) || 222 222 in_vfork(p)) { 223 223 task_unlock(p); 224 224 return LONG_MIN; ··· 325 325 * any memory is quite low. 326 326 */ 327 327 if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) { 328 - if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags)) 328 + if (mm_flags_test(MMF_OOM_SKIP, task->signal->oom_mm)) 329 329 goto next; 330 330 goto abort; 331 331 } ··· 524 524 * should imply barriers already and the reader would hit a page fault 525 525 * if it stumbled over a reaped memory. 526 526 */ 527 - set_bit(MMF_UNSTABLE, &mm->flags); 527 + mm_flags_set(MMF_UNSTABLE, mm); 528 528 529 529 for_each_vma(vmi, vma) { 530 530 if (vma->vm_flags & (VM_HUGETLB|VM_PFNMAP)) ··· 583 583 * under mmap_lock for reading because it serializes against the 584 584 * mmap_write_lock();mmap_write_unlock() cycle in exit_mmap(). 585 585 */ 586 - if (test_bit(MMF_OOM_SKIP, &mm->flags)) { 586 + if (mm_flags_test(MMF_OOM_SKIP, mm)) { 587 587 trace_skip_task_reaping(tsk->pid); 588 588 goto out_unlock; 589 589 } ··· 619 619 schedule_timeout_idle(HZ/10); 620 620 621 621 if (attempts <= MAX_OOM_REAP_RETRIES || 622 - test_bit(MMF_OOM_SKIP, &mm->flags)) 622 + mm_flags_test(MMF_OOM_SKIP, mm)) 623 623 goto done; 624 624 625 625 pr_info("oom_reaper: unable to reap pid:%d (%s)\n", ··· 634 634 * Hide this mm from OOM killer because it has been either reaped or 635 635 * somebody can't call mmap_write_unlock(mm). 636 636 */ 637 - set_bit(MMF_OOM_SKIP, &mm->flags); 637 + mm_flags_set(MMF_OOM_SKIP, mm); 638 638 639 639 /* Drop a reference taken by queue_oom_reaper */ 640 640 put_task_struct(tsk); ··· 670 670 unsigned long flags; 671 671 672 672 /* The victim managed to terminate on its own - see exit_mmap */ 673 - if (test_bit(MMF_OOM_SKIP, &mm->flags)) { 673 + if (mm_flags_test(MMF_OOM_SKIP, mm)) { 674 674 put_task_struct(tsk); 675 675 return; 676 676 } ··· 695 695 static void queue_oom_reaper(struct task_struct *tsk) 696 696 { 697 697 /* mm is already queued? */ 698 - if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags)) 698 + if (mm_flags_test_and_set(MMF_OOM_REAP_QUEUED, tsk->signal->oom_mm)) 699 699 return; 700 700 701 701 get_task_struct(tsk); ··· 892 892 * This task has already been drained by the oom reaper so there are 893 893 * only small chances it will free some more 894 894 */ 895 - if (test_bit(MMF_OOM_SKIP, &mm->flags)) 895 + if (mm_flags_test(MMF_OOM_SKIP, mm)) 896 896 return false; 897 897 898 898 if (atomic_read(&mm->mm_users) <= 1) ··· 977 977 continue; 978 978 if (is_global_init(p)) { 979 979 can_oom_reap = false; 980 - set_bit(MMF_OOM_SKIP, &mm->flags); 980 + mm_flags_set(MMF_OOM_SKIP, mm); 981 981 pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n", 982 982 task_pid_nr(victim), victim->comm, 983 983 task_pid_nr(p), p->comm); ··· 1235 1235 reap = true; 1236 1236 else { 1237 1237 /* Error only if the work has not been done already */ 1238 - if (!test_bit(MMF_OOM_SKIP, &mm->flags)) 1238 + if (!mm_flags_test(MMF_OOM_SKIP, mm)) 1239 1239 ret = -EINVAL; 1240 1240 } 1241 1241 task_unlock(p); ··· 1251 1251 * Check MMF_OOM_SKIP again under mmap_read_lock protection to ensure 1252 1252 * possible change in exit_mmap is seen 1253 1253 */ 1254 - if (!test_bit(MMF_OOM_SKIP, &mm->flags) && !__oom_reap_task_mm(mm)) 1254 + if (mm_flags_test(MMF_OOM_SKIP, mm) && !__oom_reap_task_mm(mm)) 1255 1255 ret = -EAGAIN; 1256 1256 mmap_read_unlock(mm); 1257 1257
+3 -3
mm/util.c
··· 471 471 472 472 if (mmap_is_legacy(rlim_stack)) { 473 473 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; 474 - clear_bit(MMF_TOPDOWN, &mm->flags); 474 + mm_flags_clear(MMF_TOPDOWN, mm); 475 475 } else { 476 476 mm->mmap_base = mmap_base(random_factor, rlim_stack); 477 - set_bit(MMF_TOPDOWN, &mm->flags); 477 + mm_flags_set(MMF_TOPDOWN, mm); 478 478 } 479 479 } 480 480 #elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) 481 481 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) 482 482 { 483 483 mm->mmap_base = TASK_UNMAPPED_BASE; 484 - clear_bit(MMF_TOPDOWN, &mm->flags); 484 + mm_flags_clear(MMF_TOPDOWN, mm); 485 485 } 486 486 #endif 487 487 #ifdef CONFIG_MMU