Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __LINUX_KSM_H
3#define __LINUX_KSM_H
4/*
5 * Memory merging support.
6 *
7 * This code enables dynamic sharing of identical pages found in different
8 * memory areas, even if they are not shared by fork().
9 */
10
11#include <linux/bitops.h>
12#include <linux/mm.h>
13#include <linux/pagemap.h>
14#include <linux/rmap.h>
15#include <linux/sched.h>
16
17#ifdef CONFIG_KSM
18int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
19 unsigned long end, int advice, vm_flags_t *vm_flags);
20vm_flags_t ksm_vma_flags(struct mm_struct *mm, const struct file *file,
21 vm_flags_t vm_flags);
22int ksm_enable_merge_any(struct mm_struct *mm);
23int ksm_disable_merge_any(struct mm_struct *mm);
24int ksm_disable(struct mm_struct *mm);
25
26int __ksm_enter(struct mm_struct *mm);
27void __ksm_exit(struct mm_struct *mm);
28/*
29 * To identify zeropages that were mapped by KSM, we reuse the dirty bit
30 * in the PTE. If the PTE is dirty, the zeropage was mapped by KSM when
31 * deduplicating memory.
32 */
33#define is_ksm_zero_pte(pte) (is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte))
34
35extern atomic_long_t ksm_zero_pages;
36
37static inline void ksm_map_zero_page(struct mm_struct *mm)
38{
39 atomic_long_inc(&ksm_zero_pages);
40 atomic_long_inc(&mm->ksm_zero_pages);
41}
42
43static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
44{
45 if (is_ksm_zero_pte(pte)) {
46 atomic_long_dec(&ksm_zero_pages);
47 atomic_long_dec(&mm->ksm_zero_pages);
48 }
49}
50
51static inline long mm_ksm_zero_pages(struct mm_struct *mm)
52{
53 return atomic_long_read(&mm->ksm_zero_pages);
54}
55
56static inline void ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
57{
58 /* Adding mm to ksm is best effort on fork. */
59 if (mm_flags_test(MMF_VM_MERGEABLE, oldmm)) {
60 long nr_ksm_zero_pages = atomic_long_read(&mm->ksm_zero_pages);
61
62 mm->ksm_merging_pages = 0;
63 mm->ksm_rmap_items = 0;
64 atomic_long_add(nr_ksm_zero_pages, &ksm_zero_pages);
65 __ksm_enter(mm);
66 }
67}
68
69static inline int ksm_execve(struct mm_struct *mm)
70{
71 if (mm_flags_test(MMF_VM_MERGE_ANY, mm))
72 return __ksm_enter(mm);
73
74 return 0;
75}
76
77static inline void ksm_exit(struct mm_struct *mm)
78{
79 if (mm_flags_test(MMF_VM_MERGEABLE, mm))
80 __ksm_exit(mm);
81}
82
83/*
84 * When do_swap_page() first faults in from swap what used to be a KSM page,
85 * no problem, it will be assigned to this vma's anon_vma; but thereafter,
86 * it might be faulted into a different anon_vma (or perhaps to a different
87 * offset in the same anon_vma). do_swap_page() cannot do all the locking
88 * needed to reconstitute a cross-anon_vma KSM page: for now it has to make
89 * a copy, and leave remerging the pages to a later pass of ksmd.
90 *
91 * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
92 * but what if the vma was unmerged while the page was swapped out?
93 */
94struct folio *ksm_might_need_to_copy(struct folio *folio,
95 struct vm_area_struct *vma, unsigned long addr);
96
97void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
98void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
99void collect_procs_ksm(const struct folio *folio, const struct page *page,
100 struct list_head *to_kill, int force_early);
101long ksm_process_profit(struct mm_struct *);
102bool ksm_process_mergeable(struct mm_struct *mm);
103
104#else /* !CONFIG_KSM */
105
106static inline vm_flags_t ksm_vma_flags(struct mm_struct *mm,
107 const struct file *file, vm_flags_t vm_flags)
108{
109 return vm_flags;
110}
111
112static inline int ksm_disable(struct mm_struct *mm)
113{
114 return 0;
115}
116
117static inline void ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
118{
119}
120
121static inline int ksm_execve(struct mm_struct *mm)
122{
123 return 0;
124}
125
126static inline void ksm_exit(struct mm_struct *mm)
127{
128}
129
130static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
131{
132}
133
134static inline void collect_procs_ksm(const struct folio *folio,
135 const struct page *page, struct list_head *to_kill,
136 int force_early)
137{
138}
139
140#ifdef CONFIG_MMU
141static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
142 unsigned long end, int advice, vm_flags_t *vm_flags)
143{
144 return 0;
145}
146
147static inline struct folio *ksm_might_need_to_copy(struct folio *folio,
148 struct vm_area_struct *vma, unsigned long addr)
149{
150 return folio;
151}
152
153static inline void rmap_walk_ksm(struct folio *folio,
154 struct rmap_walk_control *rwc)
155{
156}
157
158static inline void folio_migrate_ksm(struct folio *newfolio, struct folio *old)
159{
160}
161#endif /* CONFIG_MMU */
162#endif /* !CONFIG_KSM */
163
164#endif /* __LINUX_KSM_H */