Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_HUGE_MM_H
3#define _LINUX_HUGE_MM_H
4
5#include <linux/sched/coredump.h>
6#include <linux/mm_types.h>
7
8#include <linux/fs.h> /* only for vma_is_dax() */
9#include <linux/kobject.h>
10
11vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
12int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
13 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
14 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
15void huge_pmd_set_accessed(struct vm_fault *vmf);
16int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
17 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
18 struct vm_area_struct *vma);
19
20#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
21void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
22#else
23static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
24{
25}
26#endif
27
28vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf);
29bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
30 pmd_t *pmd, unsigned long addr, unsigned long next);
31int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
32 unsigned long addr);
33int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
34 unsigned long addr);
35bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
36 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd);
37int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
38 pmd_t *pmd, unsigned long addr, pgprot_t newprot,
39 unsigned long cp_flags);
40
41vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
42vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
43
44enum transparent_hugepage_flag {
45 TRANSPARENT_HUGEPAGE_UNSUPPORTED,
46 TRANSPARENT_HUGEPAGE_FLAG,
47 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
48 TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
49 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
50 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
51 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
52 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
53 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
54};
55
56struct kobject;
57struct kobj_attribute;
58
59ssize_t single_hugepage_flag_store(struct kobject *kobj,
60 struct kobj_attribute *attr,
61 const char *buf, size_t count,
62 enum transparent_hugepage_flag flag);
63ssize_t single_hugepage_flag_show(struct kobject *kobj,
64 struct kobj_attribute *attr, char *buf,
65 enum transparent_hugepage_flag flag);
66extern struct kobj_attribute shmem_enabled_attr;
67extern struct kobj_attribute thpsize_shmem_enabled_attr;
68
69/*
70 * Mask of all large folio orders supported for anonymous THP; all orders up to
71 * and including PMD_ORDER, except order-0 (which is not "huge") and order-1
72 * (which is a limitation of the THP implementation).
73 */
74#define THP_ORDERS_ALL_ANON ((BIT(PMD_ORDER + 1) - 1) & ~(BIT(0) | BIT(1)))
75
76/*
77 * Mask of all large folio orders supported for file THP. Folios in a DAX
78 * file is never split and the MAX_PAGECACHE_ORDER limit does not apply to
79 * it.
80 */
81#define THP_ORDERS_ALL_FILE_DAX \
82 (BIT(PMD_ORDER) | BIT(PUD_ORDER))
83#define THP_ORDERS_ALL_FILE_DEFAULT \
84 ((BIT(MAX_PAGECACHE_ORDER + 1) - 1) & ~BIT(0))
85
86/*
87 * Mask of all large folio orders supported for THP.
88 */
89#define THP_ORDERS_ALL \
90 (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE_DAX | THP_ORDERS_ALL_FILE_DEFAULT)
91
92#define TVA_SMAPS (1 << 0) /* Will be used for procfs */
93#define TVA_IN_PF (1 << 1) /* Page fault handler */
94#define TVA_ENFORCE_SYSFS (1 << 2) /* Obey sysfs configuration */
95
96#define thp_vma_allowable_order(vma, vm_flags, tva_flags, order) \
97 (!!thp_vma_allowable_orders(vma, vm_flags, tva_flags, BIT(order)))
98
99#ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
100#define HPAGE_PMD_SHIFT PMD_SHIFT
101#define HPAGE_PUD_SHIFT PUD_SHIFT
102#else
103#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
104#define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
105#endif
106
107#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
108#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
109#define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
110#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
111
112#define HPAGE_PUD_ORDER (HPAGE_PUD_SHIFT-PAGE_SHIFT)
113#define HPAGE_PUD_NR (1<<HPAGE_PUD_ORDER)
114#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
115#define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
116
117#ifdef CONFIG_TRANSPARENT_HUGEPAGE
118
119extern unsigned long transparent_hugepage_flags;
120extern unsigned long huge_anon_orders_always;
121extern unsigned long huge_anon_orders_madvise;
122extern unsigned long huge_anon_orders_inherit;
123
124static inline bool hugepage_global_enabled(void)
125{
126 return transparent_hugepage_flags &
127 ((1<<TRANSPARENT_HUGEPAGE_FLAG) |
128 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG));
129}
130
131static inline bool hugepage_global_always(void)
132{
133 return transparent_hugepage_flags &
134 (1<<TRANSPARENT_HUGEPAGE_FLAG);
135}
136
137static inline int highest_order(unsigned long orders)
138{
139 return fls_long(orders) - 1;
140}
141
142static inline int next_order(unsigned long *orders, int prev)
143{
144 *orders &= ~BIT(prev);
145 return highest_order(*orders);
146}
147
148/*
149 * Do the below checks:
150 * - For file vma, check if the linear page offset of vma is
151 * order-aligned within the file. The hugepage is
152 * guaranteed to be order-aligned within the file, but we must
153 * check that the order-aligned addresses in the VMA map to
154 * order-aligned offsets within the file, else the hugepage will
155 * not be mappable.
156 * - For all vmas, check if the haddr is in an aligned hugepage
157 * area.
158 */
159static inline bool thp_vma_suitable_order(struct vm_area_struct *vma,
160 unsigned long addr, int order)
161{
162 unsigned long hpage_size = PAGE_SIZE << order;
163 unsigned long haddr;
164
165 /* Don't have to check pgoff for anonymous vma */
166 if (!vma_is_anonymous(vma)) {
167 if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
168 hpage_size >> PAGE_SHIFT))
169 return false;
170 }
171
172 haddr = ALIGN_DOWN(addr, hpage_size);
173
174 if (haddr < vma->vm_start || haddr + hpage_size > vma->vm_end)
175 return false;
176 return true;
177}
178
179/*
180 * Filter the bitfield of input orders to the ones suitable for use in the vma.
181 * See thp_vma_suitable_order().
182 * All orders that pass the checks are returned as a bitfield.
183 */
184static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
185 unsigned long addr, unsigned long orders)
186{
187 int order;
188
189 /*
190 * Iterate over orders, highest to lowest, removing orders that don't
191 * meet alignment requirements from the set. Exit loop at first order
192 * that meets requirements, since all lower orders must also meet
193 * requirements.
194 */
195
196 order = highest_order(orders);
197
198 while (orders) {
199 if (thp_vma_suitable_order(vma, addr, order))
200 break;
201 order = next_order(&orders, order);
202 }
203
204 return orders;
205}
206
207static inline bool file_thp_enabled(struct vm_area_struct *vma)
208{
209 struct inode *inode;
210
211 if (!vma->vm_file)
212 return false;
213
214 inode = vma->vm_file->f_inode;
215
216 return (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS)) &&
217 !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode);
218}
219
220unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
221 unsigned long vm_flags,
222 unsigned long tva_flags,
223 unsigned long orders);
224
225/**
226 * thp_vma_allowable_orders - determine hugepage orders that are allowed for vma
227 * @vma: the vm area to check
228 * @vm_flags: use these vm_flags instead of vma->vm_flags
229 * @tva_flags: Which TVA flags to honour
230 * @orders: bitfield of all orders to consider
231 *
232 * Calculates the intersection of the requested hugepage orders and the allowed
233 * hugepage orders for the provided vma. Permitted orders are encoded as a set
234 * bit at the corresponding bit position (bit-2 corresponds to order-2, bit-3
235 * corresponds to order-3, etc). Order-0 is never considered a hugepage order.
236 *
237 * Return: bitfield of orders allowed for hugepage in the vma. 0 if no hugepage
238 * orders are allowed.
239 */
240static inline
241unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
242 unsigned long vm_flags,
243 unsigned long tva_flags,
244 unsigned long orders)
245{
246 /* Optimization to check if required orders are enabled early. */
247 if ((tva_flags & TVA_ENFORCE_SYSFS) && vma_is_anonymous(vma)) {
248 unsigned long mask = READ_ONCE(huge_anon_orders_always);
249
250 if (vm_flags & VM_HUGEPAGE)
251 mask |= READ_ONCE(huge_anon_orders_madvise);
252 if (hugepage_global_always() ||
253 ((vm_flags & VM_HUGEPAGE) && hugepage_global_enabled()))
254 mask |= READ_ONCE(huge_anon_orders_inherit);
255
256 orders &= mask;
257 if (!orders)
258 return 0;
259 }
260
261 return __thp_vma_allowable_orders(vma, vm_flags, tva_flags, orders);
262}
263
264struct thpsize {
265 struct kobject kobj;
266 struct list_head node;
267 int order;
268};
269
270#define to_thpsize(kobj) container_of(kobj, struct thpsize, kobj)
271
272enum mthp_stat_item {
273 MTHP_STAT_ANON_FAULT_ALLOC,
274 MTHP_STAT_ANON_FAULT_FALLBACK,
275 MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
276 MTHP_STAT_SWPOUT,
277 MTHP_STAT_SWPOUT_FALLBACK,
278 MTHP_STAT_SHMEM_ALLOC,
279 MTHP_STAT_SHMEM_FALLBACK,
280 MTHP_STAT_SHMEM_FALLBACK_CHARGE,
281 MTHP_STAT_SPLIT,
282 MTHP_STAT_SPLIT_FAILED,
283 MTHP_STAT_SPLIT_DEFERRED,
284 __MTHP_STAT_COUNT
285};
286
287struct mthp_stat {
288 unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT];
289};
290
291#ifdef CONFIG_SYSFS
292DECLARE_PER_CPU(struct mthp_stat, mthp_stats);
293
294static inline void count_mthp_stat(int order, enum mthp_stat_item item)
295{
296 if (order <= 0 || order > PMD_ORDER)
297 return;
298
299 this_cpu_inc(mthp_stats.stats[order][item]);
300}
301#else
302static inline void count_mthp_stat(int order, enum mthp_stat_item item)
303{
304}
305#endif
306
307#define transparent_hugepage_use_zero_page() \
308 (transparent_hugepage_flags & \
309 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
310
311unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
312 unsigned long len, unsigned long pgoff, unsigned long flags);
313unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
314 unsigned long len, unsigned long pgoff, unsigned long flags,
315 vm_flags_t vm_flags);
316
317bool can_split_folio(struct folio *folio, int *pextra_pins);
318int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
319 unsigned int new_order);
320static inline int split_huge_page(struct page *page)
321{
322 return split_huge_page_to_list_to_order(page, NULL, 0);
323}
324void deferred_split_folio(struct folio *folio);
325
326void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
327 unsigned long address, bool freeze, struct folio *folio);
328
329#define split_huge_pmd(__vma, __pmd, __address) \
330 do { \
331 pmd_t *____pmd = (__pmd); \
332 if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd) \
333 || pmd_devmap(*____pmd)) \
334 __split_huge_pmd(__vma, __pmd, __address, \
335 false, NULL); \
336 } while (0)
337
338
339void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
340 bool freeze, struct folio *folio);
341
342void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
343 unsigned long address);
344
345#define split_huge_pud(__vma, __pud, __address) \
346 do { \
347 pud_t *____pud = (__pud); \
348 if (pud_trans_huge(*____pud) \
349 || pud_devmap(*____pud)) \
350 __split_huge_pud(__vma, __pud, __address); \
351 } while (0)
352
353int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags,
354 int advice);
355int madvise_collapse(struct vm_area_struct *vma,
356 struct vm_area_struct **prev,
357 unsigned long start, unsigned long end);
358void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
359 unsigned long end, long adjust_next);
360spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
361spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);
362
363static inline int is_swap_pmd(pmd_t pmd)
364{
365 return !pmd_none(pmd) && !pmd_present(pmd);
366}
367
368/* mmap_lock must be held on entry */
369static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
370 struct vm_area_struct *vma)
371{
372 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
373 return __pmd_trans_huge_lock(pmd, vma);
374 else
375 return NULL;
376}
377static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
378 struct vm_area_struct *vma)
379{
380 if (pud_trans_huge(*pud) || pud_devmap(*pud))
381 return __pud_trans_huge_lock(pud, vma);
382 else
383 return NULL;
384}
385
386/**
387 * folio_test_pmd_mappable - Can we map this folio with a PMD?
388 * @folio: The folio to test
389 */
390static inline bool folio_test_pmd_mappable(struct folio *folio)
391{
392 return folio_order(folio) >= HPAGE_PMD_ORDER;
393}
394
395struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
396 pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
397
398vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf);
399
400extern struct folio *huge_zero_folio;
401extern unsigned long huge_zero_pfn;
402
403static inline bool is_huge_zero_folio(const struct folio *folio)
404{
405 return READ_ONCE(huge_zero_folio) == folio;
406}
407
408static inline bool is_huge_zero_pmd(pmd_t pmd)
409{
410 return pmd_present(pmd) && READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd);
411}
412
413static inline bool is_huge_zero_pud(pud_t pud)
414{
415 return false;
416}
417
418struct folio *mm_get_huge_zero_folio(struct mm_struct *mm);
419void mm_put_huge_zero_folio(struct mm_struct *mm);
420
421#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
422
423static inline bool thp_migration_supported(void)
424{
425 return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
426}
427
428void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address,
429 pmd_t *pmd, bool freeze, struct folio *folio);
430bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr,
431 pmd_t *pmdp, struct folio *folio);
432
433#else /* CONFIG_TRANSPARENT_HUGEPAGE */
434
435static inline bool folio_test_pmd_mappable(struct folio *folio)
436{
437 return false;
438}
439
440static inline bool thp_vma_suitable_order(struct vm_area_struct *vma,
441 unsigned long addr, int order)
442{
443 return false;
444}
445
446static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
447 unsigned long addr, unsigned long orders)
448{
449 return 0;
450}
451
452static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
453 unsigned long vm_flags,
454 unsigned long tva_flags,
455 unsigned long orders)
456{
457 return 0;
458}
459
460#define transparent_hugepage_flags 0UL
461
462#define thp_get_unmapped_area NULL
463
464static inline unsigned long
465thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
466 unsigned long len, unsigned long pgoff,
467 unsigned long flags, vm_flags_t vm_flags)
468{
469 return 0;
470}
471
472static inline bool
473can_split_folio(struct folio *folio, int *pextra_pins)
474{
475 return false;
476}
477static inline int
478split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
479 unsigned int new_order)
480{
481 return 0;
482}
483static inline int split_huge_page(struct page *page)
484{
485 return 0;
486}
487static inline void deferred_split_folio(struct folio *folio) {}
488#define split_huge_pmd(__vma, __pmd, __address) \
489 do { } while (0)
490
491static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
492 unsigned long address, bool freeze, struct folio *folio) {}
493static inline void split_huge_pmd_address(struct vm_area_struct *vma,
494 unsigned long address, bool freeze, struct folio *folio) {}
495static inline void split_huge_pmd_locked(struct vm_area_struct *vma,
496 unsigned long address, pmd_t *pmd,
497 bool freeze, struct folio *folio) {}
498
499static inline bool unmap_huge_pmd_locked(struct vm_area_struct *vma,
500 unsigned long addr, pmd_t *pmdp,
501 struct folio *folio)
502{
503 return false;
504}
505
506#define split_huge_pud(__vma, __pmd, __address) \
507 do { } while (0)
508
509static inline int hugepage_madvise(struct vm_area_struct *vma,
510 unsigned long *vm_flags, int advice)
511{
512 return -EINVAL;
513}
514
515static inline int madvise_collapse(struct vm_area_struct *vma,
516 struct vm_area_struct **prev,
517 unsigned long start, unsigned long end)
518{
519 return -EINVAL;
520}
521
522static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
523 unsigned long start,
524 unsigned long end,
525 long adjust_next)
526{
527}
528static inline int is_swap_pmd(pmd_t pmd)
529{
530 return 0;
531}
532static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
533 struct vm_area_struct *vma)
534{
535 return NULL;
536}
537static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
538 struct vm_area_struct *vma)
539{
540 return NULL;
541}
542
543static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
544{
545 return 0;
546}
547
548static inline bool is_huge_zero_folio(const struct folio *folio)
549{
550 return false;
551}
552
553static inline bool is_huge_zero_pmd(pmd_t pmd)
554{
555 return false;
556}
557
558static inline bool is_huge_zero_pud(pud_t pud)
559{
560 return false;
561}
562
563static inline void mm_put_huge_zero_folio(struct mm_struct *mm)
564{
565 return;
566}
567
568static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
569 unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
570{
571 return NULL;
572}
573
574static inline bool thp_migration_supported(void)
575{
576 return false;
577}
578
579static inline int highest_order(unsigned long orders)
580{
581 return 0;
582}
583
584static inline int next_order(unsigned long *orders, int prev)
585{
586 return 0;
587}
588#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
589
590static inline int split_folio_to_list_to_order(struct folio *folio,
591 struct list_head *list, int new_order)
592{
593 return split_huge_page_to_list_to_order(&folio->page, list, new_order);
594}
595
596static inline int split_folio_to_order(struct folio *folio, int new_order)
597{
598 return split_folio_to_list_to_order(folio, NULL, new_order);
599}
600
601#define split_folio_to_list(f, l) split_folio_to_list_to_order(f, l, 0)
602#define split_folio(f) split_folio_to_order(f, 0)
603
604#endif /* _LINUX_HUGE_MM_H */