at v5.2 11 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_HUGE_MM_H 3#define _LINUX_HUGE_MM_H 4 5#include <linux/sched/coredump.h> 6#include <linux/mm_types.h> 7 8#include <linux/fs.h> /* only for vma_is_dax() */ 9 10extern vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf); 11extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, 12 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 13 struct vm_area_struct *vma); 14extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd); 15extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, 16 pud_t *dst_pud, pud_t *src_pud, unsigned long addr, 17 struct vm_area_struct *vma); 18 19#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 20extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud); 21#else 22static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) 23{ 24} 25#endif 26 27extern vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd); 28extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, 29 unsigned long addr, 30 pmd_t *pmd, 31 unsigned int flags); 32extern bool madvise_free_huge_pmd(struct mmu_gather *tlb, 33 struct vm_area_struct *vma, 34 pmd_t *pmd, unsigned long addr, unsigned long next); 35extern int zap_huge_pmd(struct mmu_gather *tlb, 36 struct vm_area_struct *vma, 37 pmd_t *pmd, unsigned long addr); 38extern int zap_huge_pud(struct mmu_gather *tlb, 39 struct vm_area_struct *vma, 40 pud_t *pud, unsigned long addr); 41extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 42 unsigned long addr, unsigned long end, 43 unsigned char *vec); 44extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, 45 unsigned long new_addr, unsigned long old_end, 46 pmd_t *old_pmd, pmd_t *new_pmd); 47extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 48 unsigned long addr, pgprot_t newprot, 49 int prot_numa); 50vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write); 51vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write); 52enum transparent_hugepage_flag { 53 TRANSPARENT_HUGEPAGE_FLAG, 54 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 55 TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, 56 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, 57 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, 58 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, 59 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, 60 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG, 61#ifdef CONFIG_DEBUG_VM 62 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG, 63#endif 64}; 65 66struct kobject; 67struct kobj_attribute; 68 69extern ssize_t single_hugepage_flag_store(struct kobject *kobj, 70 struct kobj_attribute *attr, 71 const char *buf, size_t count, 72 enum transparent_hugepage_flag flag); 73extern ssize_t single_hugepage_flag_show(struct kobject *kobj, 74 struct kobj_attribute *attr, char *buf, 75 enum transparent_hugepage_flag flag); 76extern struct kobj_attribute shmem_enabled_attr; 77 78#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) 79#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) 80 81#ifdef CONFIG_TRANSPARENT_HUGEPAGE 82#define HPAGE_PMD_SHIFT PMD_SHIFT 83#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT) 84#define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1)) 85 86#define HPAGE_PUD_SHIFT PUD_SHIFT 87#define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT) 88#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1)) 89 90extern bool is_vma_temporary_stack(struct vm_area_struct *vma); 91 92extern unsigned long transparent_hugepage_flags; 93 94/* 95 * to be used on vmas which are known to support THP. 96 * Use transparent_hugepage_enabled otherwise 97 */ 98static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) 99{ 100 if (vma->vm_flags & VM_NOHUGEPAGE) 101 return false; 102 103 if (is_vma_temporary_stack(vma)) 104 return false; 105 106 if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) 107 return false; 108 109 if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG)) 110 return true; 111 112 if (vma_is_dax(vma)) 113 return true; 114 115 if (transparent_hugepage_flags & 116 (1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)) 117 return !!(vma->vm_flags & VM_HUGEPAGE); 118 119 return false; 120} 121 122bool transparent_hugepage_enabled(struct vm_area_struct *vma); 123 124#define transparent_hugepage_use_zero_page() \ 125 (transparent_hugepage_flags & \ 126 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) 127#ifdef CONFIG_DEBUG_VM 128#define transparent_hugepage_debug_cow() \ 129 (transparent_hugepage_flags & \ 130 (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG)) 131#else /* CONFIG_DEBUG_VM */ 132#define transparent_hugepage_debug_cow() 0 133#endif /* CONFIG_DEBUG_VM */ 134 135extern unsigned long thp_get_unmapped_area(struct file *filp, 136 unsigned long addr, unsigned long len, unsigned long pgoff, 137 unsigned long flags); 138 139extern void prep_transhuge_page(struct page *page); 140extern void free_transhuge_page(struct page *page); 141 142bool can_split_huge_page(struct page *page, int *pextra_pins); 143int split_huge_page_to_list(struct page *page, struct list_head *list); 144static inline int split_huge_page(struct page *page) 145{ 146 return split_huge_page_to_list(page, NULL); 147} 148void deferred_split_huge_page(struct page *page); 149 150void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 151 unsigned long address, bool freeze, struct page *page); 152 153#define split_huge_pmd(__vma, __pmd, __address) \ 154 do { \ 155 pmd_t *____pmd = (__pmd); \ 156 if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd) \ 157 || pmd_devmap(*____pmd)) \ 158 __split_huge_pmd(__vma, __pmd, __address, \ 159 false, NULL); \ 160 } while (0) 161 162 163void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, 164 bool freeze, struct page *page); 165 166void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, 167 unsigned long address); 168 169#define split_huge_pud(__vma, __pud, __address) \ 170 do { \ 171 pud_t *____pud = (__pud); \ 172 if (pud_trans_huge(*____pud) \ 173 || pud_devmap(*____pud)) \ 174 __split_huge_pud(__vma, __pud, __address); \ 175 } while (0) 176 177extern int hugepage_madvise(struct vm_area_struct *vma, 178 unsigned long *vm_flags, int advice); 179extern void vma_adjust_trans_huge(struct vm_area_struct *vma, 180 unsigned long start, 181 unsigned long end, 182 long adjust_next); 183extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, 184 struct vm_area_struct *vma); 185extern spinlock_t *__pud_trans_huge_lock(pud_t *pud, 186 struct vm_area_struct *vma); 187 188static inline int is_swap_pmd(pmd_t pmd) 189{ 190 return !pmd_none(pmd) && !pmd_present(pmd); 191} 192 193/* mmap_sem must be held on entry */ 194static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, 195 struct vm_area_struct *vma) 196{ 197 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); 198 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) 199 return __pmd_trans_huge_lock(pmd, vma); 200 else 201 return NULL; 202} 203static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, 204 struct vm_area_struct *vma) 205{ 206 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); 207 if (pud_trans_huge(*pud) || pud_devmap(*pud)) 208 return __pud_trans_huge_lock(pud, vma); 209 else 210 return NULL; 211} 212static inline int hpage_nr_pages(struct page *page) 213{ 214 if (unlikely(PageTransHuge(page))) 215 return HPAGE_PMD_NR; 216 return 1; 217} 218 219struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, 220 pmd_t *pmd, int flags, struct dev_pagemap **pgmap); 221struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, 222 pud_t *pud, int flags, struct dev_pagemap **pgmap); 223 224extern vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd); 225 226extern struct page *huge_zero_page; 227 228static inline bool is_huge_zero_page(struct page *page) 229{ 230 return READ_ONCE(huge_zero_page) == page; 231} 232 233static inline bool is_huge_zero_pmd(pmd_t pmd) 234{ 235 return is_huge_zero_page(pmd_page(pmd)); 236} 237 238static inline bool is_huge_zero_pud(pud_t pud) 239{ 240 return false; 241} 242 243struct page *mm_get_huge_zero_page(struct mm_struct *mm); 244void mm_put_huge_zero_page(struct mm_struct *mm); 245 246#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot)) 247 248static inline bool thp_migration_supported(void) 249{ 250 return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION); 251} 252 253#else /* CONFIG_TRANSPARENT_HUGEPAGE */ 254#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) 255#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) 256#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; }) 257 258#define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; }) 259#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; }) 260#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; }) 261 262#define hpage_nr_pages(x) 1 263 264static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) 265{ 266 return false; 267} 268 269static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma) 270{ 271 return false; 272} 273 274static inline void prep_transhuge_page(struct page *page) {} 275 276#define transparent_hugepage_flags 0UL 277 278#define thp_get_unmapped_area NULL 279 280static inline bool 281can_split_huge_page(struct page *page, int *pextra_pins) 282{ 283 BUILD_BUG(); 284 return false; 285} 286static inline int 287split_huge_page_to_list(struct page *page, struct list_head *list) 288{ 289 return 0; 290} 291static inline int split_huge_page(struct page *page) 292{ 293 return 0; 294} 295static inline void deferred_split_huge_page(struct page *page) {} 296#define split_huge_pmd(__vma, __pmd, __address) \ 297 do { } while (0) 298 299static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 300 unsigned long address, bool freeze, struct page *page) {} 301static inline void split_huge_pmd_address(struct vm_area_struct *vma, 302 unsigned long address, bool freeze, struct page *page) {} 303 304#define split_huge_pud(__vma, __pmd, __address) \ 305 do { } while (0) 306 307static inline int hugepage_madvise(struct vm_area_struct *vma, 308 unsigned long *vm_flags, int advice) 309{ 310 BUG(); 311 return 0; 312} 313static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, 314 unsigned long start, 315 unsigned long end, 316 long adjust_next) 317{ 318} 319static inline int is_swap_pmd(pmd_t pmd) 320{ 321 return 0; 322} 323static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, 324 struct vm_area_struct *vma) 325{ 326 return NULL; 327} 328static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, 329 struct vm_area_struct *vma) 330{ 331 return NULL; 332} 333 334static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, 335 pmd_t orig_pmd) 336{ 337 return 0; 338} 339 340static inline bool is_huge_zero_page(struct page *page) 341{ 342 return false; 343} 344 345static inline bool is_huge_zero_pud(pud_t pud) 346{ 347 return false; 348} 349 350static inline void mm_put_huge_zero_page(struct mm_struct *mm) 351{ 352 return; 353} 354 355static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, 356 unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap) 357{ 358 return NULL; 359} 360 361static inline struct page *follow_devmap_pud(struct vm_area_struct *vma, 362 unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap) 363{ 364 return NULL; 365} 366 367static inline bool thp_migration_supported(void) 368{ 369 return false; 370} 371#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 372 373#endif /* _LINUX_HUGE_MM_H */