at v5.3 396 lines 11 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_HUGE_MM_H 3#define _LINUX_HUGE_MM_H 4 5#include <linux/sched/coredump.h> 6#include <linux/mm_types.h> 7 8#include <linux/fs.h> /* only for vma_is_dax() */ 9 10extern vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf); 11extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, 12 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 13 struct vm_area_struct *vma); 14extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd); 15extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, 16 pud_t *dst_pud, pud_t *src_pud, unsigned long addr, 17 struct vm_area_struct *vma); 18 19#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 20extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud); 21#else 22static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) 23{ 24} 25#endif 26 27extern vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd); 28extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, 29 unsigned long addr, 30 pmd_t *pmd, 31 unsigned int flags); 32extern bool madvise_free_huge_pmd(struct mmu_gather *tlb, 33 struct vm_area_struct *vma, 34 pmd_t *pmd, unsigned long addr, unsigned long next); 35extern int zap_huge_pmd(struct mmu_gather *tlb, 36 struct vm_area_struct *vma, 37 pmd_t *pmd, unsigned long addr); 38extern int zap_huge_pud(struct mmu_gather *tlb, 39 struct vm_area_struct *vma, 40 pud_t *pud, unsigned long addr); 41extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 42 unsigned long addr, unsigned long end, 43 unsigned char *vec); 44extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, 45 unsigned long new_addr, unsigned long old_end, 46 pmd_t *old_pmd, pmd_t *new_pmd); 47extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 48 unsigned long addr, pgprot_t newprot, 49 int prot_numa); 50vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write); 51vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write); 52enum transparent_hugepage_flag { 53 TRANSPARENT_HUGEPAGE_FLAG, 54 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 55 TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, 56 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, 57 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, 58 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, 59 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, 60 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG, 61#ifdef CONFIG_DEBUG_VM 62 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG, 63#endif 64}; 65 66struct kobject; 67struct kobj_attribute; 68 69extern ssize_t single_hugepage_flag_store(struct kobject *kobj, 70 struct kobj_attribute *attr, 71 const char *buf, size_t count, 72 enum transparent_hugepage_flag flag); 73extern ssize_t single_hugepage_flag_show(struct kobject *kobj, 74 struct kobj_attribute *attr, char *buf, 75 enum transparent_hugepage_flag flag); 76extern struct kobj_attribute shmem_enabled_attr; 77 78#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) 79#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) 80 81#ifdef CONFIG_TRANSPARENT_HUGEPAGE 82#define HPAGE_PMD_SHIFT PMD_SHIFT 83#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT) 84#define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1)) 85 86#define HPAGE_PUD_SHIFT PUD_SHIFT 87#define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT) 88#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1)) 89 90extern bool is_vma_temporary_stack(struct vm_area_struct *vma); 91 92extern unsigned long transparent_hugepage_flags; 93 94/* 95 * to be used on vmas which are known to support THP. 96 * Use transparent_hugepage_enabled otherwise 97 */ 98static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) 99{ 100 if (vma->vm_flags & VM_NOHUGEPAGE) 101 return false; 102 103 if (is_vma_temporary_stack(vma)) 104 return false; 105 106 if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) 107 return false; 108 109 if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG)) 110 return true; 111 112 if (vma_is_dax(vma)) 113 return true; 114 115 if (transparent_hugepage_flags & 116 (1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)) 117 return !!(vma->vm_flags & VM_HUGEPAGE); 118 119 return false; 120} 121 122bool transparent_hugepage_enabled(struct vm_area_struct *vma); 123 124#define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1) 125 126static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, 127 unsigned long haddr) 128{ 129 /* Don't have to check pgoff for anonymous vma */ 130 if (!vma_is_anonymous(vma)) { 131 if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) != 132 (vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK)) 133 return false; 134 } 135 136 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) 137 return false; 138 return true; 139} 140 141#define transparent_hugepage_use_zero_page() \ 142 (transparent_hugepage_flags & \ 143 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) 144#ifdef CONFIG_DEBUG_VM 145#define transparent_hugepage_debug_cow() \ 146 (transparent_hugepage_flags & \ 147 (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG)) 148#else /* CONFIG_DEBUG_VM */ 149#define transparent_hugepage_debug_cow() 0 150#endif /* CONFIG_DEBUG_VM */ 151 152extern unsigned long thp_get_unmapped_area(struct file *filp, 153 unsigned long addr, unsigned long len, unsigned long pgoff, 154 unsigned long flags); 155 156extern void prep_transhuge_page(struct page *page); 157extern void free_transhuge_page(struct page *page); 158 159bool can_split_huge_page(struct page *page, int *pextra_pins); 160int split_huge_page_to_list(struct page *page, struct list_head *list); 161static inline int split_huge_page(struct page *page) 162{ 163 return split_huge_page_to_list(page, NULL); 164} 165void deferred_split_huge_page(struct page *page); 166 167void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 168 unsigned long address, bool freeze, struct page *page); 169 170#define split_huge_pmd(__vma, __pmd, __address) \ 171 do { \ 172 pmd_t *____pmd = (__pmd); \ 173 if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd) \ 174 || pmd_devmap(*____pmd)) \ 175 __split_huge_pmd(__vma, __pmd, __address, \ 176 false, NULL); \ 177 } while (0) 178 179 180void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, 181 bool freeze, struct page *page); 182 183void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, 184 unsigned long address); 185 186#define split_huge_pud(__vma, __pud, __address) \ 187 do { \ 188 pud_t *____pud = (__pud); \ 189 if (pud_trans_huge(*____pud) \ 190 || pud_devmap(*____pud)) \ 191 __split_huge_pud(__vma, __pud, __address); \ 192 } while (0) 193 194extern int hugepage_madvise(struct vm_area_struct *vma, 195 unsigned long *vm_flags, int advice); 196extern void vma_adjust_trans_huge(struct vm_area_struct *vma, 197 unsigned long start, 198 unsigned long end, 199 long adjust_next); 200extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, 201 struct vm_area_struct *vma); 202extern spinlock_t *__pud_trans_huge_lock(pud_t *pud, 203 struct vm_area_struct *vma); 204 205static inline int is_swap_pmd(pmd_t pmd) 206{ 207 return !pmd_none(pmd) && !pmd_present(pmd); 208} 209 210/* mmap_sem must be held on entry */ 211static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, 212 struct vm_area_struct *vma) 213{ 214 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); 215 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) 216 return __pmd_trans_huge_lock(pmd, vma); 217 else 218 return NULL; 219} 220static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, 221 struct vm_area_struct *vma) 222{ 223 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); 224 if (pud_trans_huge(*pud) || pud_devmap(*pud)) 225 return __pud_trans_huge_lock(pud, vma); 226 else 227 return NULL; 228} 229static inline int hpage_nr_pages(struct page *page) 230{ 231 if (unlikely(PageTransHuge(page))) 232 return HPAGE_PMD_NR; 233 return 1; 234} 235 236struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, 237 pmd_t *pmd, int flags, struct dev_pagemap **pgmap); 238struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, 239 pud_t *pud, int flags, struct dev_pagemap **pgmap); 240 241extern vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd); 242 243extern struct page *huge_zero_page; 244 245static inline bool is_huge_zero_page(struct page *page) 246{ 247 return READ_ONCE(huge_zero_page) == page; 248} 249 250static inline bool is_huge_zero_pmd(pmd_t pmd) 251{ 252 return is_huge_zero_page(pmd_page(pmd)); 253} 254 255static inline bool is_huge_zero_pud(pud_t pud) 256{ 257 return false; 258} 259 260struct page *mm_get_huge_zero_page(struct mm_struct *mm); 261void mm_put_huge_zero_page(struct mm_struct *mm); 262 263#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot)) 264 265static inline bool thp_migration_supported(void) 266{ 267 return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION); 268} 269 270#else /* CONFIG_TRANSPARENT_HUGEPAGE */ 271#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) 272#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) 273#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; }) 274 275#define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; }) 276#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; }) 277#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; }) 278 279#define hpage_nr_pages(x) 1 280 281static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) 282{ 283 return false; 284} 285 286static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma) 287{ 288 return false; 289} 290 291static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, 292 unsigned long haddr) 293{ 294 return false; 295} 296 297static inline void prep_transhuge_page(struct page *page) {} 298 299#define transparent_hugepage_flags 0UL 300 301#define thp_get_unmapped_area NULL 302 303static inline bool 304can_split_huge_page(struct page *page, int *pextra_pins) 305{ 306 BUILD_BUG(); 307 return false; 308} 309static inline int 310split_huge_page_to_list(struct page *page, struct list_head *list) 311{ 312 return 0; 313} 314static inline int split_huge_page(struct page *page) 315{ 316 return 0; 317} 318static inline void deferred_split_huge_page(struct page *page) {} 319#define split_huge_pmd(__vma, __pmd, __address) \ 320 do { } while (0) 321 322static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 323 unsigned long address, bool freeze, struct page *page) {} 324static inline void split_huge_pmd_address(struct vm_area_struct *vma, 325 unsigned long address, bool freeze, struct page *page) {} 326 327#define split_huge_pud(__vma, __pmd, __address) \ 328 do { } while (0) 329 330static inline int hugepage_madvise(struct vm_area_struct *vma, 331 unsigned long *vm_flags, int advice) 332{ 333 BUG(); 334 return 0; 335} 336static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, 337 unsigned long start, 338 unsigned long end, 339 long adjust_next) 340{ 341} 342static inline int is_swap_pmd(pmd_t pmd) 343{ 344 return 0; 345} 346static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, 347 struct vm_area_struct *vma) 348{ 349 return NULL; 350} 351static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, 352 struct vm_area_struct *vma) 353{ 354 return NULL; 355} 356 357static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, 358 pmd_t orig_pmd) 359{ 360 return 0; 361} 362 363static inline bool is_huge_zero_page(struct page *page) 364{ 365 return false; 366} 367 368static inline bool is_huge_zero_pud(pud_t pud) 369{ 370 return false; 371} 372 373static inline void mm_put_huge_zero_page(struct mm_struct *mm) 374{ 375 return; 376} 377 378static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, 379 unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap) 380{ 381 return NULL; 382} 383 384static inline struct page *follow_devmap_pud(struct vm_area_struct *vma, 385 unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap) 386{ 387 return NULL; 388} 389 390static inline bool thp_migration_supported(void) 391{ 392 return false; 393} 394#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 395 396#endif /* _LINUX_HUGE_MM_H */