Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __LINUX_KSM_H
3#define __LINUX_KSM_H
4/*
5 * Memory merging support.
6 *
7 * This code enables dynamic sharing of identical pages found in different
8 * memory areas, even if they are not shared by fork().
9 */
10
11#include <linux/bitops.h>
12#include <linux/mm.h>
13#include <linux/pagemap.h>
14#include <linux/rmap.h>
15#include <linux/sched.h>
16
17#ifdef CONFIG_KSM
18int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
19 unsigned long end, int advice, unsigned long *vm_flags);
20
21void ksm_add_vma(struct vm_area_struct *vma);
22int ksm_enable_merge_any(struct mm_struct *mm);
23int ksm_disable_merge_any(struct mm_struct *mm);
24int ksm_disable(struct mm_struct *mm);
25
26int __ksm_enter(struct mm_struct *mm);
27void __ksm_exit(struct mm_struct *mm);
28/*
29 * To identify zeropages that were mapped by KSM, we reuse the dirty bit
30 * in the PTE. If the PTE is dirty, the zeropage was mapped by KSM when
31 * deduplicating memory.
32 */
33#define is_ksm_zero_pte(pte) (is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte))
34
35extern atomic_long_t ksm_zero_pages;
36
37static inline void ksm_map_zero_page(struct mm_struct *mm)
38{
39 atomic_long_inc(&ksm_zero_pages);
40 atomic_long_inc(&mm->ksm_zero_pages);
41}
42
43static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
44{
45 if (is_ksm_zero_pte(pte)) {
46 atomic_long_dec(&ksm_zero_pages);
47 atomic_long_dec(&mm->ksm_zero_pages);
48 }
49}
50
51static inline long mm_ksm_zero_pages(struct mm_struct *mm)
52{
53 return atomic_long_read(&mm->ksm_zero_pages);
54}
55
56static inline void ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
57{
58 /* Adding mm to ksm is best effort on fork. */
59 if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
60 __ksm_enter(mm);
61}
62
63static inline int ksm_execve(struct mm_struct *mm)
64{
65 if (test_bit(MMF_VM_MERGE_ANY, &mm->flags))
66 return __ksm_enter(mm);
67
68 return 0;
69}
70
71static inline void ksm_exit(struct mm_struct *mm)
72{
73 if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
74 __ksm_exit(mm);
75}
76
77/*
78 * When do_swap_page() first faults in from swap what used to be a KSM page,
79 * no problem, it will be assigned to this vma's anon_vma; but thereafter,
80 * it might be faulted into a different anon_vma (or perhaps to a different
81 * offset in the same anon_vma). do_swap_page() cannot do all the locking
82 * needed to reconstitute a cross-anon_vma KSM page: for now it has to make
83 * a copy, and leave remerging the pages to a later pass of ksmd.
84 *
85 * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
86 * but what if the vma was unmerged while the page was swapped out?
87 */
88struct folio *ksm_might_need_to_copy(struct folio *folio,
89 struct vm_area_struct *vma, unsigned long addr);
90
91void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
92void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
93void collect_procs_ksm(const struct folio *folio, const struct page *page,
94 struct list_head *to_kill, int force_early);
95long ksm_process_profit(struct mm_struct *);
96
97#else /* !CONFIG_KSM */
98
99static inline void ksm_add_vma(struct vm_area_struct *vma)
100{
101}
102
103static inline int ksm_disable(struct mm_struct *mm)
104{
105 return 0;
106}
107
108static inline void ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
109{
110}
111
112static inline int ksm_execve(struct mm_struct *mm)
113{
114 return 0;
115}
116
117static inline void ksm_exit(struct mm_struct *mm)
118{
119}
120
121static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
122{
123}
124
125static inline void collect_procs_ksm(const struct folio *folio,
126 const struct page *page, struct list_head *to_kill,
127 int force_early)
128{
129}
130
131#ifdef CONFIG_MMU
132static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
133 unsigned long end, int advice, unsigned long *vm_flags)
134{
135 return 0;
136}
137
138static inline struct folio *ksm_might_need_to_copy(struct folio *folio,
139 struct vm_area_struct *vma, unsigned long addr)
140{
141 return folio;
142}
143
144static inline void rmap_walk_ksm(struct folio *folio,
145 struct rmap_walk_control *rwc)
146{
147}
148
149static inline void folio_migrate_ksm(struct folio *newfolio, struct folio *old)
150{
151}
152#endif /* CONFIG_MMU */
153#endif /* !CONFIG_KSM */
154
155#endif /* __LINUX_KSM_H */