Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_HUGE_MM_H
3#define _LINUX_HUGE_MM_H
4
5#include <linux/sched/coredump.h>
6#include <linux/mm_types.h>
7
8#include <linux/fs.h> /* only for vma_is_dax() */
9
10vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
11int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
12 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
13 struct vm_area_struct *vma);
14void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
15int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
16 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
17 struct vm_area_struct *vma);
18
19#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
20void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
21#else
22static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
23{
24}
25#endif
26
27vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
28struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
29 unsigned long addr, pmd_t *pmd,
30 unsigned int flags);
31bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
32 pmd_t *pmd, unsigned long addr, unsigned long next);
33int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
34 unsigned long addr);
35int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
36 unsigned long addr);
37bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
38 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd);
39int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr,
40 pgprot_t newprot, unsigned long cp_flags);
41vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn,
42 pgprot_t pgprot, bool write);
43
44/**
45 * vmf_insert_pfn_pmd - insert a pmd size pfn
46 * @vmf: Structure describing the fault
47 * @pfn: pfn to insert
48 * @pgprot: page protection to use
49 * @write: whether it's a write fault
50 *
51 * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
52 *
53 * Return: vm_fault_t value.
54 */
55static inline vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn,
56 bool write)
57{
58 return vmf_insert_pfn_pmd_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
59}
60vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn,
61 pgprot_t pgprot, bool write);
62
63/**
64 * vmf_insert_pfn_pud - insert a pud size pfn
65 * @vmf: Structure describing the fault
66 * @pfn: pfn to insert
67 * @pgprot: page protection to use
68 * @write: whether it's a write fault
69 *
70 * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
71 *
72 * Return: vm_fault_t value.
73 */
74static inline vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn,
75 bool write)
76{
77 return vmf_insert_pfn_pud_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
78}
79
80enum transparent_hugepage_flag {
81 TRANSPARENT_HUGEPAGE_NEVER_DAX,
82 TRANSPARENT_HUGEPAGE_FLAG,
83 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
84 TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
85 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
86 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
87 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
88 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
89 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
90};
91
92struct kobject;
93struct kobj_attribute;
94
95ssize_t single_hugepage_flag_store(struct kobject *kobj,
96 struct kobj_attribute *attr,
97 const char *buf, size_t count,
98 enum transparent_hugepage_flag flag);
99ssize_t single_hugepage_flag_show(struct kobject *kobj,
100 struct kobj_attribute *attr, char *buf,
101 enum transparent_hugepage_flag flag);
102extern struct kobj_attribute shmem_enabled_attr;
103
104#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
105#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
106
107#ifdef CONFIG_TRANSPARENT_HUGEPAGE
108#define HPAGE_PMD_SHIFT PMD_SHIFT
109#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
110#define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
111
112#define HPAGE_PUD_SHIFT PUD_SHIFT
113#define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
114#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
115
116extern unsigned long transparent_hugepage_flags;
117
118/*
119 * to be used on vmas which are known to support THP.
120 * Use transparent_hugepage_enabled otherwise
121 */
122static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
123{
124
125 /*
126 * If the hardware/firmware marked hugepage support disabled.
127 */
128 if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_NEVER_DAX))
129 return false;
130
131 if (vma->vm_flags & VM_NOHUGEPAGE)
132 return false;
133
134 if (vma_is_temporary_stack(vma))
135 return false;
136
137 if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
138 return false;
139
140 if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG))
141 return true;
142
143 if (vma_is_dax(vma))
144 return true;
145
146 if (transparent_hugepage_flags &
147 (1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
148 return !!(vma->vm_flags & VM_HUGEPAGE);
149
150 return false;
151}
152
153bool transparent_hugepage_enabled(struct vm_area_struct *vma);
154
155#define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1)
156
157static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
158 unsigned long haddr)
159{
160 /* Don't have to check pgoff for anonymous vma */
161 if (!vma_is_anonymous(vma)) {
162 if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) !=
163 (vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK))
164 return false;
165 }
166
167 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
168 return false;
169 return true;
170}
171
172#define transparent_hugepage_use_zero_page() \
173 (transparent_hugepage_flags & \
174 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
175
176unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
177 unsigned long len, unsigned long pgoff, unsigned long flags);
178
179void prep_transhuge_page(struct page *page);
180void free_transhuge_page(struct page *page);
181bool is_transparent_hugepage(struct page *page);
182
183bool can_split_huge_page(struct page *page, int *pextra_pins);
184int split_huge_page_to_list(struct page *page, struct list_head *list);
185static inline int split_huge_page(struct page *page)
186{
187 return split_huge_page_to_list(page, NULL);
188}
189void deferred_split_huge_page(struct page *page);
190
191void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
192 unsigned long address, bool freeze, struct page *page);
193
194#define split_huge_pmd(__vma, __pmd, __address) \
195 do { \
196 pmd_t *____pmd = (__pmd); \
197 if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd) \
198 || pmd_devmap(*____pmd)) \
199 __split_huge_pmd(__vma, __pmd, __address, \
200 false, NULL); \
201 } while (0)
202
203
204void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
205 bool freeze, struct page *page);
206
207void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
208 unsigned long address);
209
210#define split_huge_pud(__vma, __pud, __address) \
211 do { \
212 pud_t *____pud = (__pud); \
213 if (pud_trans_huge(*____pud) \
214 || pud_devmap(*____pud)) \
215 __split_huge_pud(__vma, __pud, __address); \
216 } while (0)
217
218int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags,
219 int advice);
220void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
221 unsigned long end, long adjust_next);
222spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
223spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);
224
225static inline int is_swap_pmd(pmd_t pmd)
226{
227 return !pmd_none(pmd) && !pmd_present(pmd);
228}
229
230/* mmap_lock must be held on entry */
231static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
232 struct vm_area_struct *vma)
233{
234 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
235 return __pmd_trans_huge_lock(pmd, vma);
236 else
237 return NULL;
238}
239static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
240 struct vm_area_struct *vma)
241{
242 if (pud_trans_huge(*pud) || pud_devmap(*pud))
243 return __pud_trans_huge_lock(pud, vma);
244 else
245 return NULL;
246}
247
248/**
249 * thp_head - Head page of a transparent huge page.
250 * @page: Any page (tail, head or regular) found in the page cache.
251 */
252static inline struct page *thp_head(struct page *page)
253{
254 return compound_head(page);
255}
256
257/**
258 * thp_order - Order of a transparent huge page.
259 * @page: Head page of a transparent huge page.
260 */
261static inline unsigned int thp_order(struct page *page)
262{
263 VM_BUG_ON_PGFLAGS(PageTail(page), page);
264 if (PageHead(page))
265 return HPAGE_PMD_ORDER;
266 return 0;
267}
268
269/**
270 * thp_nr_pages - The number of regular pages in this huge page.
271 * @page: The head page of a huge page.
272 */
273static inline int thp_nr_pages(struct page *page)
274{
275 VM_BUG_ON_PGFLAGS(PageTail(page), page);
276 if (PageHead(page))
277 return HPAGE_PMD_NR;
278 return 1;
279}
280
281struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
282 pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
283struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
284 pud_t *pud, int flags, struct dev_pagemap **pgmap);
285
286vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
287
288extern struct page *huge_zero_page;
289extern unsigned long huge_zero_pfn;
290
291static inline bool is_huge_zero_page(struct page *page)
292{
293 return READ_ONCE(huge_zero_page) == page;
294}
295
296static inline bool is_huge_zero_pmd(pmd_t pmd)
297{
298 return READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd) && pmd_present(pmd);
299}
300
301static inline bool is_huge_zero_pud(pud_t pud)
302{
303 return false;
304}
305
306struct page *mm_get_huge_zero_page(struct mm_struct *mm);
307void mm_put_huge_zero_page(struct mm_struct *mm);
308
309#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
310
311static inline bool thp_migration_supported(void)
312{
313 return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
314}
315
316static inline struct list_head *page_deferred_list(struct page *page)
317{
318 /*
319 * Global or memcg deferred list in the second tail pages is
320 * occupied by compound_head.
321 */
322 return &page[2].deferred_list;
323}
324
325#else /* CONFIG_TRANSPARENT_HUGEPAGE */
326#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
327#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
328#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
329
330#define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
331#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
332#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
333
334static inline struct page *thp_head(struct page *page)
335{
336 VM_BUG_ON_PGFLAGS(PageTail(page), page);
337 return page;
338}
339
340static inline unsigned int thp_order(struct page *page)
341{
342 VM_BUG_ON_PGFLAGS(PageTail(page), page);
343 return 0;
344}
345
346static inline int thp_nr_pages(struct page *page)
347{
348 VM_BUG_ON_PGFLAGS(PageTail(page), page);
349 return 1;
350}
351
352static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
353{
354 return false;
355}
356
357static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma)
358{
359 return false;
360}
361
362static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
363 unsigned long haddr)
364{
365 return false;
366}
367
368static inline void prep_transhuge_page(struct page *page) {}
369
370static inline bool is_transparent_hugepage(struct page *page)
371{
372 return false;
373}
374
375#define transparent_hugepage_flags 0UL
376
377#define thp_get_unmapped_area NULL
378
379static inline bool
380can_split_huge_page(struct page *page, int *pextra_pins)
381{
382 BUILD_BUG();
383 return false;
384}
385static inline int
386split_huge_page_to_list(struct page *page, struct list_head *list)
387{
388 return 0;
389}
390static inline int split_huge_page(struct page *page)
391{
392 return 0;
393}
394static inline void deferred_split_huge_page(struct page *page) {}
395#define split_huge_pmd(__vma, __pmd, __address) \
396 do { } while (0)
397
398static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
399 unsigned long address, bool freeze, struct page *page) {}
400static inline void split_huge_pmd_address(struct vm_area_struct *vma,
401 unsigned long address, bool freeze, struct page *page) {}
402
403#define split_huge_pud(__vma, __pmd, __address) \
404 do { } while (0)
405
406static inline int hugepage_madvise(struct vm_area_struct *vma,
407 unsigned long *vm_flags, int advice)
408{
409 BUG();
410 return 0;
411}
412static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
413 unsigned long start,
414 unsigned long end,
415 long adjust_next)
416{
417}
418static inline int is_swap_pmd(pmd_t pmd)
419{
420 return 0;
421}
422static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
423 struct vm_area_struct *vma)
424{
425 return NULL;
426}
427static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
428 struct vm_area_struct *vma)
429{
430 return NULL;
431}
432
433static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf,
434 pmd_t orig_pmd)
435{
436 return 0;
437}
438
439static inline bool is_huge_zero_page(struct page *page)
440{
441 return false;
442}
443
444static inline bool is_huge_zero_pmd(pmd_t pmd)
445{
446 return false;
447}
448
449static inline bool is_huge_zero_pud(pud_t pud)
450{
451 return false;
452}
453
454static inline void mm_put_huge_zero_page(struct mm_struct *mm)
455{
456 return;
457}
458
459static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
460 unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
461{
462 return NULL;
463}
464
465static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
466 unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap)
467{
468 return NULL;
469}
470
471static inline bool thp_migration_supported(void)
472{
473 return false;
474}
475#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
476
477/**
478 * thp_size - Size of a transparent huge page.
479 * @page: Head page of a transparent huge page.
480 *
481 * Return: Number of bytes in this page.
482 */
483static inline unsigned long thp_size(struct page *page)
484{
485 return PAGE_SIZE << thp_order(page);
486}
487
488#endif /* _LINUX_HUGE_MM_H */