at v6.17 158 lines 4.3 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef __LINUX_KSM_H 3#define __LINUX_KSM_H 4/* 5 * Memory merging support. 6 * 7 * This code enables dynamic sharing of identical pages found in different 8 * memory areas, even if they are not shared by fork(). 9 */ 10 11#include <linux/bitops.h> 12#include <linux/mm.h> 13#include <linux/pagemap.h> 14#include <linux/rmap.h> 15#include <linux/sched.h> 16 17#ifdef CONFIG_KSM 18int ksm_madvise(struct vm_area_struct *vma, unsigned long start, 19 unsigned long end, int advice, vm_flags_t *vm_flags); 20vm_flags_t ksm_vma_flags(const struct mm_struct *mm, const struct file *file, 21 vm_flags_t vm_flags); 22int ksm_enable_merge_any(struct mm_struct *mm); 23int ksm_disable_merge_any(struct mm_struct *mm); 24int ksm_disable(struct mm_struct *mm); 25 26int __ksm_enter(struct mm_struct *mm); 27void __ksm_exit(struct mm_struct *mm); 28/* 29 * To identify zeropages that were mapped by KSM, we reuse the dirty bit 30 * in the PTE. If the PTE is dirty, the zeropage was mapped by KSM when 31 * deduplicating memory. 32 */ 33#define is_ksm_zero_pte(pte) (is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte)) 34 35extern atomic_long_t ksm_zero_pages; 36 37static inline void ksm_map_zero_page(struct mm_struct *mm) 38{ 39 atomic_long_inc(&ksm_zero_pages); 40 atomic_long_inc(&mm->ksm_zero_pages); 41} 42 43static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte) 44{ 45 if (is_ksm_zero_pte(pte)) { 46 atomic_long_dec(&ksm_zero_pages); 47 atomic_long_dec(&mm->ksm_zero_pages); 48 } 49} 50 51static inline long mm_ksm_zero_pages(struct mm_struct *mm) 52{ 53 return atomic_long_read(&mm->ksm_zero_pages); 54} 55 56static inline void ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) 57{ 58 /* Adding mm to ksm is best effort on fork. */ 59 if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) 60 __ksm_enter(mm); 61} 62 63static inline int ksm_execve(struct mm_struct *mm) 64{ 65 if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) 66 return __ksm_enter(mm); 67 68 return 0; 69} 70 71static inline void ksm_exit(struct mm_struct *mm) 72{ 73 if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) 74 __ksm_exit(mm); 75} 76 77/* 78 * When do_swap_page() first faults in from swap what used to be a KSM page, 79 * no problem, it will be assigned to this vma's anon_vma; but thereafter, 80 * it might be faulted into a different anon_vma (or perhaps to a different 81 * offset in the same anon_vma). do_swap_page() cannot do all the locking 82 * needed to reconstitute a cross-anon_vma KSM page: for now it has to make 83 * a copy, and leave remerging the pages to a later pass of ksmd. 84 * 85 * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE, 86 * but what if the vma was unmerged while the page was swapped out? 87 */ 88struct folio *ksm_might_need_to_copy(struct folio *folio, 89 struct vm_area_struct *vma, unsigned long addr); 90 91void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc); 92void folio_migrate_ksm(struct folio *newfolio, struct folio *folio); 93void collect_procs_ksm(const struct folio *folio, const struct page *page, 94 struct list_head *to_kill, int force_early); 95long ksm_process_profit(struct mm_struct *); 96bool ksm_process_mergeable(struct mm_struct *mm); 97 98#else /* !CONFIG_KSM */ 99 100static inline vm_flags_t ksm_vma_flags(const struct mm_struct *mm, 101 const struct file *file, vm_flags_t vm_flags) 102{ 103 return vm_flags; 104} 105 106static inline int ksm_disable(struct mm_struct *mm) 107{ 108 return 0; 109} 110 111static inline void ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) 112{ 113} 114 115static inline int ksm_execve(struct mm_struct *mm) 116{ 117 return 0; 118} 119 120static inline void ksm_exit(struct mm_struct *mm) 121{ 122} 123 124static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte) 125{ 126} 127 128static inline void collect_procs_ksm(const struct folio *folio, 129 const struct page *page, struct list_head *to_kill, 130 int force_early) 131{ 132} 133 134#ifdef CONFIG_MMU 135static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, 136 unsigned long end, int advice, vm_flags_t *vm_flags) 137{ 138 return 0; 139} 140 141static inline struct folio *ksm_might_need_to_copy(struct folio *folio, 142 struct vm_area_struct *vma, unsigned long addr) 143{ 144 return folio; 145} 146 147static inline void rmap_walk_ksm(struct folio *folio, 148 struct rmap_walk_control *rwc) 149{ 150} 151 152static inline void folio_migrate_ksm(struct folio *newfolio, struct folio *old) 153{ 154} 155#endif /* CONFIG_MMU */ 156#endif /* !CONFIG_KSM */ 157 158#endif /* __LINUX_KSM_H */