at v6.4 3.3 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef __LINUX_KSM_H 3#define __LINUX_KSM_H 4/* 5 * Memory merging support. 6 * 7 * This code enables dynamic sharing of identical pages found in different 8 * memory areas, even if they are not shared by fork(). 9 */ 10 11#include <linux/bitops.h> 12#include <linux/mm.h> 13#include <linux/pagemap.h> 14#include <linux/rmap.h> 15#include <linux/sched.h> 16#include <linux/sched/coredump.h> 17 18#ifdef CONFIG_KSM 19int ksm_madvise(struct vm_area_struct *vma, unsigned long start, 20 unsigned long end, int advice, unsigned long *vm_flags); 21 22void ksm_add_vma(struct vm_area_struct *vma); 23int ksm_enable_merge_any(struct mm_struct *mm); 24int ksm_disable_merge_any(struct mm_struct *mm); 25int ksm_disable(struct mm_struct *mm); 26 27int __ksm_enter(struct mm_struct *mm); 28void __ksm_exit(struct mm_struct *mm); 29 30static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) 31{ 32 int ret; 33 34 if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) { 35 ret = __ksm_enter(mm); 36 if (ret) 37 return ret; 38 } 39 40 if (test_bit(MMF_VM_MERGE_ANY, &oldmm->flags)) 41 set_bit(MMF_VM_MERGE_ANY, &mm->flags); 42 43 return 0; 44} 45 46static inline void ksm_exit(struct mm_struct *mm) 47{ 48 if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) 49 __ksm_exit(mm); 50} 51 52/* 53 * When do_swap_page() first faults in from swap what used to be a KSM page, 54 * no problem, it will be assigned to this vma's anon_vma; but thereafter, 55 * it might be faulted into a different anon_vma (or perhaps to a different 56 * offset in the same anon_vma). do_swap_page() cannot do all the locking 57 * needed to reconstitute a cross-anon_vma KSM page: for now it has to make 58 * a copy, and leave remerging the pages to a later pass of ksmd. 59 * 60 * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE, 61 * but what if the vma was unmerged while the page was swapped out? 62 */ 63struct page *ksm_might_need_to_copy(struct page *page, 64 struct vm_area_struct *vma, unsigned long address); 65 66void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc); 67void folio_migrate_ksm(struct folio *newfolio, struct folio *folio); 68 69#ifdef CONFIG_MEMORY_FAILURE 70void collect_procs_ksm(struct page *page, struct list_head *to_kill, 71 int force_early); 72#endif 73 74#ifdef CONFIG_PROC_FS 75long ksm_process_profit(struct mm_struct *); 76#endif /* CONFIG_PROC_FS */ 77 78#else /* !CONFIG_KSM */ 79 80static inline void ksm_add_vma(struct vm_area_struct *vma) 81{ 82} 83 84static inline int ksm_disable(struct mm_struct *mm) 85{ 86 return 0; 87} 88 89static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) 90{ 91 return 0; 92} 93 94static inline void ksm_exit(struct mm_struct *mm) 95{ 96} 97 98#ifdef CONFIG_MEMORY_FAILURE 99static inline void collect_procs_ksm(struct page *page, 100 struct list_head *to_kill, int force_early) 101{ 102} 103#endif 104 105#ifdef CONFIG_MMU 106static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, 107 unsigned long end, int advice, unsigned long *vm_flags) 108{ 109 return 0; 110} 111 112static inline struct page *ksm_might_need_to_copy(struct page *page, 113 struct vm_area_struct *vma, unsigned long address) 114{ 115 return page; 116} 117 118static inline void rmap_walk_ksm(struct folio *folio, 119 struct rmap_walk_control *rwc) 120{ 121} 122 123static inline void folio_migrate_ksm(struct folio *newfolio, struct folio *old) 124{ 125} 126#endif /* CONFIG_MMU */ 127#endif /* !CONFIG_KSM */ 128 129#endif /* __LINUX_KSM_H */