at v6.8 16 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_HUGE_MM_H 3#define _LINUX_HUGE_MM_H 4 5#include <linux/sched/coredump.h> 6#include <linux/mm_types.h> 7 8#include <linux/fs.h> /* only for vma_is_dax() */ 9 10vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf); 11int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, 12 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 13 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma); 14void huge_pmd_set_accessed(struct vm_fault *vmf); 15int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, 16 pud_t *dst_pud, pud_t *src_pud, unsigned long addr, 17 struct vm_area_struct *vma); 18 19#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 20void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud); 21#else 22static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) 23{ 24} 25#endif 26 27vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf); 28bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 29 pmd_t *pmd, unsigned long addr, unsigned long next); 30int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, 31 unsigned long addr); 32int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud, 33 unsigned long addr); 34bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, 35 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd); 36int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, 37 pmd_t *pmd, unsigned long addr, pgprot_t newprot, 38 unsigned long cp_flags); 39 40vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write); 41vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write); 42 43enum transparent_hugepage_flag { 44 TRANSPARENT_HUGEPAGE_UNSUPPORTED, 45 TRANSPARENT_HUGEPAGE_FLAG, 46 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 47 TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, 48 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, 49 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, 50 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, 51 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, 52 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG, 53}; 54 55struct kobject; 56struct kobj_attribute; 57 58ssize_t single_hugepage_flag_store(struct kobject *kobj, 59 struct kobj_attribute *attr, 60 const char *buf, size_t count, 61 enum transparent_hugepage_flag flag); 62ssize_t single_hugepage_flag_show(struct kobject *kobj, 63 struct kobj_attribute *attr, char *buf, 64 enum transparent_hugepage_flag flag); 65extern struct kobj_attribute shmem_enabled_attr; 66 67#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) 68#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) 69 70/* 71 * Mask of all large folio orders supported for anonymous THP; all orders up to 72 * and including PMD_ORDER, except order-0 (which is not "huge") and order-1 73 * (which is a limitation of the THP implementation). 74 */ 75#define THP_ORDERS_ALL_ANON ((BIT(PMD_ORDER + 1) - 1) & ~(BIT(0) | BIT(1))) 76 77/* 78 * Mask of all large folio orders supported for file THP. 79 */ 80#define THP_ORDERS_ALL_FILE (BIT(PMD_ORDER) | BIT(PUD_ORDER)) 81 82/* 83 * Mask of all large folio orders supported for THP. 84 */ 85#define THP_ORDERS_ALL (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE) 86 87#define thp_vma_allowable_order(vma, vm_flags, smaps, in_pf, enforce_sysfs, order) \ 88 (!!thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf, enforce_sysfs, BIT(order))) 89 90#ifdef CONFIG_TRANSPARENT_HUGEPAGE 91#define HPAGE_PMD_SHIFT PMD_SHIFT 92#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT) 93#define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1)) 94 95#define HPAGE_PUD_SHIFT PUD_SHIFT 96#define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT) 97#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1)) 98 99extern unsigned long transparent_hugepage_flags; 100extern unsigned long huge_anon_orders_always; 101extern unsigned long huge_anon_orders_madvise; 102extern unsigned long huge_anon_orders_inherit; 103 104static inline bool hugepage_global_enabled(void) 105{ 106 return transparent_hugepage_flags & 107 ((1<<TRANSPARENT_HUGEPAGE_FLAG) | 108 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)); 109} 110 111static inline bool hugepage_global_always(void) 112{ 113 return transparent_hugepage_flags & 114 (1<<TRANSPARENT_HUGEPAGE_FLAG); 115} 116 117static inline bool hugepage_flags_enabled(void) 118{ 119 /* 120 * We cover both the anon and the file-backed case here; we must return 121 * true if globally enabled, even when all anon sizes are set to never. 122 * So we don't need to look at huge_anon_orders_inherit. 123 */ 124 return hugepage_global_enabled() || 125 huge_anon_orders_always || 126 huge_anon_orders_madvise; 127} 128 129static inline int highest_order(unsigned long orders) 130{ 131 return fls_long(orders) - 1; 132} 133 134static inline int next_order(unsigned long *orders, int prev) 135{ 136 *orders &= ~BIT(prev); 137 return highest_order(*orders); 138} 139 140/* 141 * Do the below checks: 142 * - For file vma, check if the linear page offset of vma is 143 * order-aligned within the file. The hugepage is 144 * guaranteed to be order-aligned within the file, but we must 145 * check that the order-aligned addresses in the VMA map to 146 * order-aligned offsets within the file, else the hugepage will 147 * not be mappable. 148 * - For all vmas, check if the haddr is in an aligned hugepage 149 * area. 150 */ 151static inline bool thp_vma_suitable_order(struct vm_area_struct *vma, 152 unsigned long addr, int order) 153{ 154 unsigned long hpage_size = PAGE_SIZE << order; 155 unsigned long haddr; 156 157 /* Don't have to check pgoff for anonymous vma */ 158 if (!vma_is_anonymous(vma)) { 159 if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff, 160 hpage_size >> PAGE_SHIFT)) 161 return false; 162 } 163 164 haddr = ALIGN_DOWN(addr, hpage_size); 165 166 if (haddr < vma->vm_start || haddr + hpage_size > vma->vm_end) 167 return false; 168 return true; 169} 170 171/* 172 * Filter the bitfield of input orders to the ones suitable for use in the vma. 173 * See thp_vma_suitable_order(). 174 * All orders that pass the checks are returned as a bitfield. 175 */ 176static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma, 177 unsigned long addr, unsigned long orders) 178{ 179 int order; 180 181 /* 182 * Iterate over orders, highest to lowest, removing orders that don't 183 * meet alignment requirements from the set. Exit loop at first order 184 * that meets requirements, since all lower orders must also meet 185 * requirements. 186 */ 187 188 order = highest_order(orders); 189 190 while (orders) { 191 if (thp_vma_suitable_order(vma, addr, order)) 192 break; 193 order = next_order(&orders, order); 194 } 195 196 return orders; 197} 198 199static inline bool file_thp_enabled(struct vm_area_struct *vma) 200{ 201 struct inode *inode; 202 203 if (!vma->vm_file) 204 return false; 205 206 inode = vma->vm_file->f_inode; 207 208 return (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS)) && 209 !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode); 210} 211 212unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma, 213 unsigned long vm_flags, bool smaps, 214 bool in_pf, bool enforce_sysfs, 215 unsigned long orders); 216 217/** 218 * thp_vma_allowable_orders - determine hugepage orders that are allowed for vma 219 * @vma: the vm area to check 220 * @vm_flags: use these vm_flags instead of vma->vm_flags 221 * @smaps: whether answer will be used for smaps file 222 * @in_pf: whether answer will be used by page fault handler 223 * @enforce_sysfs: whether sysfs config should be taken into account 224 * @orders: bitfield of all orders to consider 225 * 226 * Calculates the intersection of the requested hugepage orders and the allowed 227 * hugepage orders for the provided vma. Permitted orders are encoded as a set 228 * bit at the corresponding bit position (bit-2 corresponds to order-2, bit-3 229 * corresponds to order-3, etc). Order-0 is never considered a hugepage order. 230 * 231 * Return: bitfield of orders allowed for hugepage in the vma. 0 if no hugepage 232 * orders are allowed. 233 */ 234static inline 235unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma, 236 unsigned long vm_flags, bool smaps, 237 bool in_pf, bool enforce_sysfs, 238 unsigned long orders) 239{ 240 /* Optimization to check if required orders are enabled early. */ 241 if (enforce_sysfs && vma_is_anonymous(vma)) { 242 unsigned long mask = READ_ONCE(huge_anon_orders_always); 243 244 if (vm_flags & VM_HUGEPAGE) 245 mask |= READ_ONCE(huge_anon_orders_madvise); 246 if (hugepage_global_always() || 247 ((vm_flags & VM_HUGEPAGE) && hugepage_global_enabled())) 248 mask |= READ_ONCE(huge_anon_orders_inherit); 249 250 orders &= mask; 251 if (!orders) 252 return 0; 253 } 254 255 return __thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf, 256 enforce_sysfs, orders); 257} 258 259#define transparent_hugepage_use_zero_page() \ 260 (transparent_hugepage_flags & \ 261 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) 262 263unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, 264 unsigned long len, unsigned long pgoff, unsigned long flags); 265 266void folio_prep_large_rmappable(struct folio *folio); 267bool can_split_folio(struct folio *folio, int *pextra_pins); 268int split_huge_page_to_list(struct page *page, struct list_head *list); 269static inline int split_huge_page(struct page *page) 270{ 271 return split_huge_page_to_list(page, NULL); 272} 273void deferred_split_folio(struct folio *folio); 274 275void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 276 unsigned long address, bool freeze, struct folio *folio); 277 278#define split_huge_pmd(__vma, __pmd, __address) \ 279 do { \ 280 pmd_t *____pmd = (__pmd); \ 281 if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd) \ 282 || pmd_devmap(*____pmd)) \ 283 __split_huge_pmd(__vma, __pmd, __address, \ 284 false, NULL); \ 285 } while (0) 286 287 288void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, 289 bool freeze, struct folio *folio); 290 291void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, 292 unsigned long address); 293 294#define split_huge_pud(__vma, __pud, __address) \ 295 do { \ 296 pud_t *____pud = (__pud); \ 297 if (pud_trans_huge(*____pud) \ 298 || pud_devmap(*____pud)) \ 299 __split_huge_pud(__vma, __pud, __address); \ 300 } while (0) 301 302int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, 303 int advice); 304int madvise_collapse(struct vm_area_struct *vma, 305 struct vm_area_struct **prev, 306 unsigned long start, unsigned long end); 307void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start, 308 unsigned long end, long adjust_next); 309spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma); 310spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma); 311 312static inline int is_swap_pmd(pmd_t pmd) 313{ 314 return !pmd_none(pmd) && !pmd_present(pmd); 315} 316 317/* mmap_lock must be held on entry */ 318static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, 319 struct vm_area_struct *vma) 320{ 321 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) 322 return __pmd_trans_huge_lock(pmd, vma); 323 else 324 return NULL; 325} 326static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, 327 struct vm_area_struct *vma) 328{ 329 if (pud_trans_huge(*pud) || pud_devmap(*pud)) 330 return __pud_trans_huge_lock(pud, vma); 331 else 332 return NULL; 333} 334 335/** 336 * folio_test_pmd_mappable - Can we map this folio with a PMD? 337 * @folio: The folio to test 338 */ 339static inline bool folio_test_pmd_mappable(struct folio *folio) 340{ 341 return folio_order(folio) >= HPAGE_PMD_ORDER; 342} 343 344struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, 345 pmd_t *pmd, int flags, struct dev_pagemap **pgmap); 346struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, 347 pud_t *pud, int flags, struct dev_pagemap **pgmap); 348 349vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf); 350 351extern struct page *huge_zero_page; 352extern unsigned long huge_zero_pfn; 353 354static inline bool is_huge_zero_page(struct page *page) 355{ 356 return READ_ONCE(huge_zero_page) == page; 357} 358 359static inline bool is_huge_zero_pmd(pmd_t pmd) 360{ 361 return pmd_present(pmd) && READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd); 362} 363 364static inline bool is_huge_zero_pud(pud_t pud) 365{ 366 return false; 367} 368 369struct page *mm_get_huge_zero_page(struct mm_struct *mm); 370void mm_put_huge_zero_page(struct mm_struct *mm); 371 372#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot)) 373 374static inline bool thp_migration_supported(void) 375{ 376 return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION); 377} 378 379#else /* CONFIG_TRANSPARENT_HUGEPAGE */ 380#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) 381#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) 382#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; }) 383 384#define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; }) 385#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; }) 386#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; }) 387 388static inline bool folio_test_pmd_mappable(struct folio *folio) 389{ 390 return false; 391} 392 393static inline bool thp_vma_suitable_order(struct vm_area_struct *vma, 394 unsigned long addr, int order) 395{ 396 return false; 397} 398 399static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma, 400 unsigned long addr, unsigned long orders) 401{ 402 return 0; 403} 404 405static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma, 406 unsigned long vm_flags, bool smaps, 407 bool in_pf, bool enforce_sysfs, 408 unsigned long orders) 409{ 410 return 0; 411} 412 413static inline void folio_prep_large_rmappable(struct folio *folio) {} 414 415#define transparent_hugepage_flags 0UL 416 417#define thp_get_unmapped_area NULL 418 419static inline bool 420can_split_folio(struct folio *folio, int *pextra_pins) 421{ 422 return false; 423} 424static inline int 425split_huge_page_to_list(struct page *page, struct list_head *list) 426{ 427 return 0; 428} 429static inline int split_huge_page(struct page *page) 430{ 431 return 0; 432} 433static inline void deferred_split_folio(struct folio *folio) {} 434#define split_huge_pmd(__vma, __pmd, __address) \ 435 do { } while (0) 436 437static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 438 unsigned long address, bool freeze, struct folio *folio) {} 439static inline void split_huge_pmd_address(struct vm_area_struct *vma, 440 unsigned long address, bool freeze, struct folio *folio) {} 441 442#define split_huge_pud(__vma, __pmd, __address) \ 443 do { } while (0) 444 445static inline int hugepage_madvise(struct vm_area_struct *vma, 446 unsigned long *vm_flags, int advice) 447{ 448 return -EINVAL; 449} 450 451static inline int madvise_collapse(struct vm_area_struct *vma, 452 struct vm_area_struct **prev, 453 unsigned long start, unsigned long end) 454{ 455 return -EINVAL; 456} 457 458static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, 459 unsigned long start, 460 unsigned long end, 461 long adjust_next) 462{ 463} 464static inline int is_swap_pmd(pmd_t pmd) 465{ 466 return 0; 467} 468static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, 469 struct vm_area_struct *vma) 470{ 471 return NULL; 472} 473static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, 474 struct vm_area_struct *vma) 475{ 476 return NULL; 477} 478 479static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) 480{ 481 return 0; 482} 483 484static inline bool is_huge_zero_page(struct page *page) 485{ 486 return false; 487} 488 489static inline bool is_huge_zero_pmd(pmd_t pmd) 490{ 491 return false; 492} 493 494static inline bool is_huge_zero_pud(pud_t pud) 495{ 496 return false; 497} 498 499static inline void mm_put_huge_zero_page(struct mm_struct *mm) 500{ 501 return; 502} 503 504static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, 505 unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap) 506{ 507 return NULL; 508} 509 510static inline struct page *follow_devmap_pud(struct vm_area_struct *vma, 511 unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap) 512{ 513 return NULL; 514} 515 516static inline bool thp_migration_supported(void) 517{ 518 return false; 519} 520#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 521 522static inline int split_folio_to_list(struct folio *folio, 523 struct list_head *list) 524{ 525 return split_huge_page_to_list(&folio->page, list); 526} 527 528static inline int split_folio(struct folio *folio) 529{ 530 return split_folio_to_list(folio, NULL); 531} 532 533/* 534 * archs that select ARCH_WANTS_THP_SWAP but don't support THP_SWP due to 535 * limitations in the implementation like arm64 MTE can override this to 536 * false 537 */ 538#ifndef arch_thp_swp_supported 539static inline bool arch_thp_swp_supported(void) 540{ 541 return true; 542} 543#endif 544 545#endif /* _LINUX_HUGE_MM_H */