at v5.18 450 lines 13 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_HUGE_MM_H 3#define _LINUX_HUGE_MM_H 4 5#include <linux/sched/coredump.h> 6#include <linux/mm_types.h> 7 8#include <linux/fs.h> /* only for vma_is_dax() */ 9 10vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf); 11int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, 12 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 13 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma); 14void huge_pmd_set_accessed(struct vm_fault *vmf); 15int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, 16 pud_t *dst_pud, pud_t *src_pud, unsigned long addr, 17 struct vm_area_struct *vma); 18 19#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 20void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud); 21#else 22static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) 23{ 24} 25#endif 26 27vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf); 28struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, 29 unsigned long addr, pmd_t *pmd, 30 unsigned int flags); 31bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 32 pmd_t *pmd, unsigned long addr, unsigned long next); 33int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, 34 unsigned long addr); 35int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud, 36 unsigned long addr); 37bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, 38 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd); 39int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, 40 pgprot_t newprot, unsigned long cp_flags); 41vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn, 42 pgprot_t pgprot, bool write); 43 44/** 45 * vmf_insert_pfn_pmd - insert a pmd size pfn 46 * @vmf: Structure describing the fault 47 * @pfn: pfn to insert 48 * @pgprot: page protection to use 49 * @write: whether it's a write fault 50 * 51 * Insert a pmd size pfn. See vmf_insert_pfn() for additional info. 52 * 53 * Return: vm_fault_t value. 54 */ 55static inline vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, 56 bool write) 57{ 58 return vmf_insert_pfn_pmd_prot(vmf, pfn, vmf->vma->vm_page_prot, write); 59} 60vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn, 61 pgprot_t pgprot, bool write); 62 63/** 64 * vmf_insert_pfn_pud - insert a pud size pfn 65 * @vmf: Structure describing the fault 66 * @pfn: pfn to insert 67 * @pgprot: page protection to use 68 * @write: whether it's a write fault 69 * 70 * Insert a pud size pfn. See vmf_insert_pfn() for additional info. 71 * 72 * Return: vm_fault_t value. 73 */ 74static inline vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, 75 bool write) 76{ 77 return vmf_insert_pfn_pud_prot(vmf, pfn, vmf->vma->vm_page_prot, write); 78} 79 80enum transparent_hugepage_flag { 81 TRANSPARENT_HUGEPAGE_NEVER_DAX, 82 TRANSPARENT_HUGEPAGE_FLAG, 83 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 84 TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, 85 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, 86 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, 87 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, 88 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, 89 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG, 90}; 91 92struct kobject; 93struct kobj_attribute; 94 95ssize_t single_hugepage_flag_store(struct kobject *kobj, 96 struct kobj_attribute *attr, 97 const char *buf, size_t count, 98 enum transparent_hugepage_flag flag); 99ssize_t single_hugepage_flag_show(struct kobject *kobj, 100 struct kobj_attribute *attr, char *buf, 101 enum transparent_hugepage_flag flag); 102extern struct kobj_attribute shmem_enabled_attr; 103 104#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) 105#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) 106 107#ifdef CONFIG_TRANSPARENT_HUGEPAGE 108#define HPAGE_PMD_SHIFT PMD_SHIFT 109#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT) 110#define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1)) 111 112#define HPAGE_PUD_SHIFT PUD_SHIFT 113#define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT) 114#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1)) 115 116extern unsigned long transparent_hugepage_flags; 117 118static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, 119 unsigned long haddr) 120{ 121 /* Don't have to check pgoff for anonymous vma */ 122 if (!vma_is_anonymous(vma)) { 123 if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff, 124 HPAGE_PMD_NR)) 125 return false; 126 } 127 128 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) 129 return false; 130 return true; 131} 132 133static inline bool transhuge_vma_enabled(struct vm_area_struct *vma, 134 unsigned long vm_flags) 135{ 136 /* Explicitly disabled through madvise. */ 137 if ((vm_flags & VM_NOHUGEPAGE) || 138 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) 139 return false; 140 return true; 141} 142 143/* 144 * to be used on vmas which are known to support THP. 145 * Use transparent_hugepage_active otherwise 146 */ 147static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) 148{ 149 150 /* 151 * If the hardware/firmware marked hugepage support disabled. 152 */ 153 if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_NEVER_DAX)) 154 return false; 155 156 if (!transhuge_vma_enabled(vma, vma->vm_flags)) 157 return false; 158 159 if (vma_is_temporary_stack(vma)) 160 return false; 161 162 if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG)) 163 return true; 164 165 if (vma_is_dax(vma)) 166 return true; 167 168 if (transparent_hugepage_flags & 169 (1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)) 170 return !!(vma->vm_flags & VM_HUGEPAGE); 171 172 return false; 173} 174 175bool transparent_hugepage_active(struct vm_area_struct *vma); 176 177#define transparent_hugepage_use_zero_page() \ 178 (transparent_hugepage_flags & \ 179 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) 180 181unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, 182 unsigned long len, unsigned long pgoff, unsigned long flags); 183 184void prep_transhuge_page(struct page *page); 185void free_transhuge_page(struct page *page); 186 187bool can_split_folio(struct folio *folio, int *pextra_pins); 188int split_huge_page_to_list(struct page *page, struct list_head *list); 189static inline int split_huge_page(struct page *page) 190{ 191 return split_huge_page_to_list(page, NULL); 192} 193void deferred_split_huge_page(struct page *page); 194 195void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 196 unsigned long address, bool freeze, struct folio *folio); 197 198#define split_huge_pmd(__vma, __pmd, __address) \ 199 do { \ 200 pmd_t *____pmd = (__pmd); \ 201 if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd) \ 202 || pmd_devmap(*____pmd)) \ 203 __split_huge_pmd(__vma, __pmd, __address, \ 204 false, NULL); \ 205 } while (0) 206 207 208void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, 209 bool freeze, struct folio *folio); 210 211void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, 212 unsigned long address); 213 214#define split_huge_pud(__vma, __pud, __address) \ 215 do { \ 216 pud_t *____pud = (__pud); \ 217 if (pud_trans_huge(*____pud) \ 218 || pud_devmap(*____pud)) \ 219 __split_huge_pud(__vma, __pud, __address); \ 220 } while (0) 221 222int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, 223 int advice); 224void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start, 225 unsigned long end, long adjust_next); 226spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma); 227spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma); 228 229static inline int is_swap_pmd(pmd_t pmd) 230{ 231 return !pmd_none(pmd) && !pmd_present(pmd); 232} 233 234/* mmap_lock must be held on entry */ 235static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, 236 struct vm_area_struct *vma) 237{ 238 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) 239 return __pmd_trans_huge_lock(pmd, vma); 240 else 241 return NULL; 242} 243static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, 244 struct vm_area_struct *vma) 245{ 246 if (pud_trans_huge(*pud) || pud_devmap(*pud)) 247 return __pud_trans_huge_lock(pud, vma); 248 else 249 return NULL; 250} 251 252/** 253 * folio_test_pmd_mappable - Can we map this folio with a PMD? 254 * @folio: The folio to test 255 */ 256static inline bool folio_test_pmd_mappable(struct folio *folio) 257{ 258 return folio_order(folio) >= HPAGE_PMD_ORDER; 259} 260 261struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, 262 pmd_t *pmd, int flags, struct dev_pagemap **pgmap); 263struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, 264 pud_t *pud, int flags, struct dev_pagemap **pgmap); 265 266vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf); 267 268extern struct page *huge_zero_page; 269extern unsigned long huge_zero_pfn; 270 271static inline bool is_huge_zero_page(struct page *page) 272{ 273 return READ_ONCE(huge_zero_page) == page; 274} 275 276static inline bool is_huge_zero_pmd(pmd_t pmd) 277{ 278 return READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd) && pmd_present(pmd); 279} 280 281static inline bool is_huge_zero_pud(pud_t pud) 282{ 283 return false; 284} 285 286struct page *mm_get_huge_zero_page(struct mm_struct *mm); 287void mm_put_huge_zero_page(struct mm_struct *mm); 288 289#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot)) 290 291static inline bool thp_migration_supported(void) 292{ 293 return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION); 294} 295 296static inline struct list_head *page_deferred_list(struct page *page) 297{ 298 /* 299 * Global or memcg deferred list in the second tail pages is 300 * occupied by compound_head. 301 */ 302 return &page[2].deferred_list; 303} 304 305#else /* CONFIG_TRANSPARENT_HUGEPAGE */ 306#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) 307#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) 308#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; }) 309 310#define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; }) 311#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; }) 312#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; }) 313 314static inline bool folio_test_pmd_mappable(struct folio *folio) 315{ 316 return false; 317} 318 319static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) 320{ 321 return false; 322} 323 324static inline bool transparent_hugepage_active(struct vm_area_struct *vma) 325{ 326 return false; 327} 328 329static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, 330 unsigned long haddr) 331{ 332 return false; 333} 334 335static inline bool transhuge_vma_enabled(struct vm_area_struct *vma, 336 unsigned long vm_flags) 337{ 338 return false; 339} 340 341static inline void prep_transhuge_page(struct page *page) {} 342 343#define transparent_hugepage_flags 0UL 344 345#define thp_get_unmapped_area NULL 346 347static inline bool 348can_split_folio(struct folio *folio, int *pextra_pins) 349{ 350 BUILD_BUG(); 351 return false; 352} 353static inline int 354split_huge_page_to_list(struct page *page, struct list_head *list) 355{ 356 return 0; 357} 358static inline int split_huge_page(struct page *page) 359{ 360 return 0; 361} 362static inline void deferred_split_huge_page(struct page *page) {} 363#define split_huge_pmd(__vma, __pmd, __address) \ 364 do { } while (0) 365 366static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 367 unsigned long address, bool freeze, struct folio *folio) {} 368static inline void split_huge_pmd_address(struct vm_area_struct *vma, 369 unsigned long address, bool freeze, struct folio *folio) {} 370 371#define split_huge_pud(__vma, __pmd, __address) \ 372 do { } while (0) 373 374static inline int hugepage_madvise(struct vm_area_struct *vma, 375 unsigned long *vm_flags, int advice) 376{ 377 BUG(); 378 return 0; 379} 380static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, 381 unsigned long start, 382 unsigned long end, 383 long adjust_next) 384{ 385} 386static inline int is_swap_pmd(pmd_t pmd) 387{ 388 return 0; 389} 390static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, 391 struct vm_area_struct *vma) 392{ 393 return NULL; 394} 395static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, 396 struct vm_area_struct *vma) 397{ 398 return NULL; 399} 400 401static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) 402{ 403 return 0; 404} 405 406static inline bool is_huge_zero_page(struct page *page) 407{ 408 return false; 409} 410 411static inline bool is_huge_zero_pmd(pmd_t pmd) 412{ 413 return false; 414} 415 416static inline bool is_huge_zero_pud(pud_t pud) 417{ 418 return false; 419} 420 421static inline void mm_put_huge_zero_page(struct mm_struct *mm) 422{ 423 return; 424} 425 426static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, 427 unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap) 428{ 429 return NULL; 430} 431 432static inline struct page *follow_devmap_pud(struct vm_area_struct *vma, 433 unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap) 434{ 435 return NULL; 436} 437 438static inline bool thp_migration_supported(void) 439{ 440 return false; 441} 442#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 443 444static inline int split_folio_to_list(struct folio *folio, 445 struct list_head *list) 446{ 447 return split_huge_page_to_list(&folio->page, list); 448} 449 450#endif /* _LINUX_HUGE_MM_H */