Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_HUGE_MM_H
3#define _LINUX_HUGE_MM_H
4
5#include <linux/sched/coredump.h>
6#include <linux/mm_types.h>
7
8#include <linux/fs.h> /* only for vma_is_dax() */
9
10vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
11int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
12 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
13 struct vm_area_struct *vma);
14void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
15int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
16 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
17 struct vm_area_struct *vma);
18
19#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
20void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
21#else
22static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
23{
24}
25#endif
26
27vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
28struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
29 unsigned long addr, pmd_t *pmd,
30 unsigned int flags);
31bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
32 pmd_t *pmd, unsigned long addr, unsigned long next);
33int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
34 unsigned long addr);
35int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
36 unsigned long addr);
37bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
38 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd);
39int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr,
40 pgprot_t newprot, unsigned long cp_flags);
41vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn,
42 pgprot_t pgprot, bool write);
43
44/**
45 * vmf_insert_pfn_pmd - insert a pmd size pfn
46 * @vmf: Structure describing the fault
47 * @pfn: pfn to insert
48 * @pgprot: page protection to use
49 * @write: whether it's a write fault
50 *
51 * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
52 *
53 * Return: vm_fault_t value.
54 */
55static inline vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn,
56 bool write)
57{
58 return vmf_insert_pfn_pmd_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
59}
60vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn,
61 pgprot_t pgprot, bool write);
62
63/**
64 * vmf_insert_pfn_pud - insert a pud size pfn
65 * @vmf: Structure describing the fault
66 * @pfn: pfn to insert
67 * @pgprot: page protection to use
68 * @write: whether it's a write fault
69 *
70 * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
71 *
72 * Return: vm_fault_t value.
73 */
74static inline vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn,
75 bool write)
76{
77 return vmf_insert_pfn_pud_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
78}
79
80enum transparent_hugepage_flag {
81 TRANSPARENT_HUGEPAGE_NEVER_DAX,
82 TRANSPARENT_HUGEPAGE_FLAG,
83 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
84 TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
85 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
86 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
87 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
88 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
89 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
90#ifdef CONFIG_DEBUG_VM
91 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
92#endif
93};
94
95struct kobject;
96struct kobj_attribute;
97
98ssize_t single_hugepage_flag_store(struct kobject *kobj,
99 struct kobj_attribute *attr,
100 const char *buf, size_t count,
101 enum transparent_hugepage_flag flag);
102ssize_t single_hugepage_flag_show(struct kobject *kobj,
103 struct kobj_attribute *attr, char *buf,
104 enum transparent_hugepage_flag flag);
105extern struct kobj_attribute shmem_enabled_attr;
106
107#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
108#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
109
110#ifdef CONFIG_TRANSPARENT_HUGEPAGE
111#define HPAGE_PMD_SHIFT PMD_SHIFT
112#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
113#define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
114
115#define HPAGE_PUD_SHIFT PUD_SHIFT
116#define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
117#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
118
119extern unsigned long transparent_hugepage_flags;
120
121/*
122 * to be used on vmas which are known to support THP.
123 * Use transparent_hugepage_enabled otherwise
124 */
125static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
126{
127
128 /*
129 * If the hardware/firmware marked hugepage support disabled.
130 */
131 if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_NEVER_DAX))
132 return false;
133
134 if (vma->vm_flags & VM_NOHUGEPAGE)
135 return false;
136
137 if (vma_is_temporary_stack(vma))
138 return false;
139
140 if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
141 return false;
142
143 if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG))
144 return true;
145
146 if (vma_is_dax(vma))
147 return true;
148
149 if (transparent_hugepage_flags &
150 (1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
151 return !!(vma->vm_flags & VM_HUGEPAGE);
152
153 return false;
154}
155
156bool transparent_hugepage_enabled(struct vm_area_struct *vma);
157
158#define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1)
159
160static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
161 unsigned long haddr)
162{
163 /* Don't have to check pgoff for anonymous vma */
164 if (!vma_is_anonymous(vma)) {
165 if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) !=
166 (vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK))
167 return false;
168 }
169
170 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
171 return false;
172 return true;
173}
174
175#define transparent_hugepage_use_zero_page() \
176 (transparent_hugepage_flags & \
177 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
178
179unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
180 unsigned long len, unsigned long pgoff, unsigned long flags);
181
182void prep_transhuge_page(struct page *page);
183void free_transhuge_page(struct page *page);
184bool is_transparent_hugepage(struct page *page);
185
186bool can_split_huge_page(struct page *page, int *pextra_pins);
187int split_huge_page_to_list(struct page *page, struct list_head *list);
188static inline int split_huge_page(struct page *page)
189{
190 return split_huge_page_to_list(page, NULL);
191}
192void deferred_split_huge_page(struct page *page);
193
194void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
195 unsigned long address, bool freeze, struct page *page);
196
197#define split_huge_pmd(__vma, __pmd, __address) \
198 do { \
199 pmd_t *____pmd = (__pmd); \
200 if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd) \
201 || pmd_devmap(*____pmd)) \
202 __split_huge_pmd(__vma, __pmd, __address, \
203 false, NULL); \
204 } while (0)
205
206
207void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
208 bool freeze, struct page *page);
209
210void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
211 unsigned long address);
212
213#define split_huge_pud(__vma, __pud, __address) \
214 do { \
215 pud_t *____pud = (__pud); \
216 if (pud_trans_huge(*____pud) \
217 || pud_devmap(*____pud)) \
218 __split_huge_pud(__vma, __pud, __address); \
219 } while (0)
220
221int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags,
222 int advice);
223void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
224 unsigned long end, long adjust_next);
225spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
226spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);
227
228static inline int is_swap_pmd(pmd_t pmd)
229{
230 return !pmd_none(pmd) && !pmd_present(pmd);
231}
232
233/* mmap_lock must be held on entry */
234static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
235 struct vm_area_struct *vma)
236{
237 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
238 return __pmd_trans_huge_lock(pmd, vma);
239 else
240 return NULL;
241}
242static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
243 struct vm_area_struct *vma)
244{
245 if (pud_trans_huge(*pud) || pud_devmap(*pud))
246 return __pud_trans_huge_lock(pud, vma);
247 else
248 return NULL;
249}
250
251/**
252 * thp_head - Head page of a transparent huge page.
253 * @page: Any page (tail, head or regular) found in the page cache.
254 */
255static inline struct page *thp_head(struct page *page)
256{
257 return compound_head(page);
258}
259
260/**
261 * thp_order - Order of a transparent huge page.
262 * @page: Head page of a transparent huge page.
263 */
264static inline unsigned int thp_order(struct page *page)
265{
266 VM_BUG_ON_PGFLAGS(PageTail(page), page);
267 if (PageHead(page))
268 return HPAGE_PMD_ORDER;
269 return 0;
270}
271
272/**
273 * thp_nr_pages - The number of regular pages in this huge page.
274 * @page: The head page of a huge page.
275 */
276static inline int thp_nr_pages(struct page *page)
277{
278 VM_BUG_ON_PGFLAGS(PageTail(page), page);
279 if (PageHead(page))
280 return HPAGE_PMD_NR;
281 return 1;
282}
283
284struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
285 pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
286struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
287 pud_t *pud, int flags, struct dev_pagemap **pgmap);
288
289vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
290
291extern struct page *huge_zero_page;
292
293static inline bool is_huge_zero_page(struct page *page)
294{
295 return READ_ONCE(huge_zero_page) == page;
296}
297
298static inline bool is_huge_zero_pmd(pmd_t pmd)
299{
300 return is_huge_zero_page(pmd_page(pmd));
301}
302
303static inline bool is_huge_zero_pud(pud_t pud)
304{
305 return false;
306}
307
308struct page *mm_get_huge_zero_page(struct mm_struct *mm);
309void mm_put_huge_zero_page(struct mm_struct *mm);
310
311#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
312
313static inline bool thp_migration_supported(void)
314{
315 return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
316}
317
318static inline struct list_head *page_deferred_list(struct page *page)
319{
320 /*
321 * Global or memcg deferred list in the second tail pages is
322 * occupied by compound_head.
323 */
324 return &page[2].deferred_list;
325}
326
327#else /* CONFIG_TRANSPARENT_HUGEPAGE */
328#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
329#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
330#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
331
332#define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
333#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
334#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
335
336static inline struct page *thp_head(struct page *page)
337{
338 VM_BUG_ON_PGFLAGS(PageTail(page), page);
339 return page;
340}
341
342static inline unsigned int thp_order(struct page *page)
343{
344 VM_BUG_ON_PGFLAGS(PageTail(page), page);
345 return 0;
346}
347
348static inline int thp_nr_pages(struct page *page)
349{
350 VM_BUG_ON_PGFLAGS(PageTail(page), page);
351 return 1;
352}
353
354static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
355{
356 return false;
357}
358
359static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma)
360{
361 return false;
362}
363
364static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
365 unsigned long haddr)
366{
367 return false;
368}
369
370static inline void prep_transhuge_page(struct page *page) {}
371
372static inline bool is_transparent_hugepage(struct page *page)
373{
374 return false;
375}
376
377#define transparent_hugepage_flags 0UL
378
379#define thp_get_unmapped_area NULL
380
381static inline bool
382can_split_huge_page(struct page *page, int *pextra_pins)
383{
384 BUILD_BUG();
385 return false;
386}
387static inline int
388split_huge_page_to_list(struct page *page, struct list_head *list)
389{
390 return 0;
391}
392static inline int split_huge_page(struct page *page)
393{
394 return 0;
395}
396static inline void deferred_split_huge_page(struct page *page) {}
397#define split_huge_pmd(__vma, __pmd, __address) \
398 do { } while (0)
399
400static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
401 unsigned long address, bool freeze, struct page *page) {}
402static inline void split_huge_pmd_address(struct vm_area_struct *vma,
403 unsigned long address, bool freeze, struct page *page) {}
404
405#define split_huge_pud(__vma, __pmd, __address) \
406 do { } while (0)
407
408static inline int hugepage_madvise(struct vm_area_struct *vma,
409 unsigned long *vm_flags, int advice)
410{
411 BUG();
412 return 0;
413}
414static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
415 unsigned long start,
416 unsigned long end,
417 long adjust_next)
418{
419}
420static inline int is_swap_pmd(pmd_t pmd)
421{
422 return 0;
423}
424static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
425 struct vm_area_struct *vma)
426{
427 return NULL;
428}
429static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
430 struct vm_area_struct *vma)
431{
432 return NULL;
433}
434
435static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf,
436 pmd_t orig_pmd)
437{
438 return 0;
439}
440
441static inline bool is_huge_zero_page(struct page *page)
442{
443 return false;
444}
445
446static inline bool is_huge_zero_pud(pud_t pud)
447{
448 return false;
449}
450
451static inline void mm_put_huge_zero_page(struct mm_struct *mm)
452{
453 return;
454}
455
456static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
457 unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
458{
459 return NULL;
460}
461
462static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
463 unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap)
464{
465 return NULL;
466}
467
468static inline bool thp_migration_supported(void)
469{
470 return false;
471}
472#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
473
474/**
475 * thp_size - Size of a transparent huge page.
476 * @page: Head page of a transparent huge page.
477 *
478 * Return: Number of bytes in this page.
479 */
480static inline unsigned long thp_size(struct page *page)
481{
482 return PAGE_SIZE << thp_order(page);
483}
484
485#endif /* _LINUX_HUGE_MM_H */