at v4.18 362 lines 10 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_HUGE_MM_H 3#define _LINUX_HUGE_MM_H 4 5#include <linux/sched/coredump.h> 6 7#include <linux/fs.h> /* only for vma_is_dax() */ 8 9extern int do_huge_pmd_anonymous_page(struct vm_fault *vmf); 10extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, 11 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 12 struct vm_area_struct *vma); 13extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd); 14extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, 15 pud_t *dst_pud, pud_t *src_pud, unsigned long addr, 16 struct vm_area_struct *vma); 17 18#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 19extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud); 20#else 21static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) 22{ 23} 24#endif 25 26extern int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd); 27extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, 28 unsigned long addr, 29 pmd_t *pmd, 30 unsigned int flags); 31extern bool madvise_free_huge_pmd(struct mmu_gather *tlb, 32 struct vm_area_struct *vma, 33 pmd_t *pmd, unsigned long addr, unsigned long next); 34extern int zap_huge_pmd(struct mmu_gather *tlb, 35 struct vm_area_struct *vma, 36 pmd_t *pmd, unsigned long addr); 37extern int zap_huge_pud(struct mmu_gather *tlb, 38 struct vm_area_struct *vma, 39 pud_t *pud, unsigned long addr); 40extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 41 unsigned long addr, unsigned long end, 42 unsigned char *vec); 43extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, 44 unsigned long new_addr, unsigned long old_end, 45 pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush); 46extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 47 unsigned long addr, pgprot_t newprot, 48 int prot_numa); 49int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, 50 pmd_t *pmd, pfn_t pfn, bool write); 51int vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, 52 pud_t *pud, pfn_t pfn, bool write); 53enum transparent_hugepage_flag { 54 TRANSPARENT_HUGEPAGE_FLAG, 55 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 56 TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, 57 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, 58 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, 59 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, 60 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, 61 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG, 62#ifdef CONFIG_DEBUG_VM 63 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG, 64#endif 65}; 66 67struct kobject; 68struct kobj_attribute; 69 70extern ssize_t single_hugepage_flag_store(struct kobject *kobj, 71 struct kobj_attribute *attr, 72 const char *buf, size_t count, 73 enum transparent_hugepage_flag flag); 74extern ssize_t single_hugepage_flag_show(struct kobject *kobj, 75 struct kobj_attribute *attr, char *buf, 76 enum transparent_hugepage_flag flag); 77extern struct kobj_attribute shmem_enabled_attr; 78 79#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) 80#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) 81 82#ifdef CONFIG_TRANSPARENT_HUGEPAGE 83#define HPAGE_PMD_SHIFT PMD_SHIFT 84#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT) 85#define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1)) 86 87#define HPAGE_PUD_SHIFT PUD_SHIFT 88#define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT) 89#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1)) 90 91extern bool is_vma_temporary_stack(struct vm_area_struct *vma); 92 93extern unsigned long transparent_hugepage_flags; 94 95static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma) 96{ 97 if (vma->vm_flags & VM_NOHUGEPAGE) 98 return false; 99 100 if (is_vma_temporary_stack(vma)) 101 return false; 102 103 if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) 104 return false; 105 106 if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG)) 107 return true; 108 109 if (vma_is_dax(vma)) 110 return true; 111 112 if (transparent_hugepage_flags & 113 (1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)) 114 return !!(vma->vm_flags & VM_HUGEPAGE); 115 116 return false; 117} 118 119#define transparent_hugepage_use_zero_page() \ 120 (transparent_hugepage_flags & \ 121 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) 122#ifdef CONFIG_DEBUG_VM 123#define transparent_hugepage_debug_cow() \ 124 (transparent_hugepage_flags & \ 125 (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG)) 126#else /* CONFIG_DEBUG_VM */ 127#define transparent_hugepage_debug_cow() 0 128#endif /* CONFIG_DEBUG_VM */ 129 130extern unsigned long thp_get_unmapped_area(struct file *filp, 131 unsigned long addr, unsigned long len, unsigned long pgoff, 132 unsigned long flags); 133 134extern void prep_transhuge_page(struct page *page); 135extern void free_transhuge_page(struct page *page); 136 137bool can_split_huge_page(struct page *page, int *pextra_pins); 138int split_huge_page_to_list(struct page *page, struct list_head *list); 139static inline int split_huge_page(struct page *page) 140{ 141 return split_huge_page_to_list(page, NULL); 142} 143void deferred_split_huge_page(struct page *page); 144 145void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 146 unsigned long address, bool freeze, struct page *page); 147 148#define split_huge_pmd(__vma, __pmd, __address) \ 149 do { \ 150 pmd_t *____pmd = (__pmd); \ 151 if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd) \ 152 || pmd_devmap(*____pmd)) \ 153 __split_huge_pmd(__vma, __pmd, __address, \ 154 false, NULL); \ 155 } while (0) 156 157 158void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, 159 bool freeze, struct page *page); 160 161void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, 162 unsigned long address); 163 164#define split_huge_pud(__vma, __pud, __address) \ 165 do { \ 166 pud_t *____pud = (__pud); \ 167 if (pud_trans_huge(*____pud) \ 168 || pud_devmap(*____pud)) \ 169 __split_huge_pud(__vma, __pud, __address); \ 170 } while (0) 171 172extern int hugepage_madvise(struct vm_area_struct *vma, 173 unsigned long *vm_flags, int advice); 174extern void vma_adjust_trans_huge(struct vm_area_struct *vma, 175 unsigned long start, 176 unsigned long end, 177 long adjust_next); 178extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, 179 struct vm_area_struct *vma); 180extern spinlock_t *__pud_trans_huge_lock(pud_t *pud, 181 struct vm_area_struct *vma); 182 183static inline int is_swap_pmd(pmd_t pmd) 184{ 185 return !pmd_none(pmd) && !pmd_present(pmd); 186} 187 188/* mmap_sem must be held on entry */ 189static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, 190 struct vm_area_struct *vma) 191{ 192 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); 193 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) 194 return __pmd_trans_huge_lock(pmd, vma); 195 else 196 return NULL; 197} 198static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, 199 struct vm_area_struct *vma) 200{ 201 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); 202 if (pud_trans_huge(*pud) || pud_devmap(*pud)) 203 return __pud_trans_huge_lock(pud, vma); 204 else 205 return NULL; 206} 207static inline int hpage_nr_pages(struct page *page) 208{ 209 if (unlikely(PageTransHuge(page))) 210 return HPAGE_PMD_NR; 211 return 1; 212} 213 214struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, 215 pmd_t *pmd, int flags); 216struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, 217 pud_t *pud, int flags); 218 219extern int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd); 220 221extern struct page *huge_zero_page; 222 223static inline bool is_huge_zero_page(struct page *page) 224{ 225 return READ_ONCE(huge_zero_page) == page; 226} 227 228static inline bool is_huge_zero_pmd(pmd_t pmd) 229{ 230 return is_huge_zero_page(pmd_page(pmd)); 231} 232 233static inline bool is_huge_zero_pud(pud_t pud) 234{ 235 return false; 236} 237 238struct page *mm_get_huge_zero_page(struct mm_struct *mm); 239void mm_put_huge_zero_page(struct mm_struct *mm); 240 241#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot)) 242 243static inline bool thp_migration_supported(void) 244{ 245 return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION); 246} 247 248#else /* CONFIG_TRANSPARENT_HUGEPAGE */ 249#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) 250#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) 251#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; }) 252 253#define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; }) 254#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; }) 255#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; }) 256 257#define hpage_nr_pages(x) 1 258 259static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma) 260{ 261 return false; 262} 263 264static inline void prep_transhuge_page(struct page *page) {} 265 266#define transparent_hugepage_flags 0UL 267 268#define thp_get_unmapped_area NULL 269 270static inline bool 271can_split_huge_page(struct page *page, int *pextra_pins) 272{ 273 BUILD_BUG(); 274 return false; 275} 276static inline int 277split_huge_page_to_list(struct page *page, struct list_head *list) 278{ 279 return 0; 280} 281static inline int split_huge_page(struct page *page) 282{ 283 return 0; 284} 285static inline void deferred_split_huge_page(struct page *page) {} 286#define split_huge_pmd(__vma, __pmd, __address) \ 287 do { } while (0) 288 289static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 290 unsigned long address, bool freeze, struct page *page) {} 291static inline void split_huge_pmd_address(struct vm_area_struct *vma, 292 unsigned long address, bool freeze, struct page *page) {} 293 294#define split_huge_pud(__vma, __pmd, __address) \ 295 do { } while (0) 296 297static inline int hugepage_madvise(struct vm_area_struct *vma, 298 unsigned long *vm_flags, int advice) 299{ 300 BUG(); 301 return 0; 302} 303static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, 304 unsigned long start, 305 unsigned long end, 306 long adjust_next) 307{ 308} 309static inline int is_swap_pmd(pmd_t pmd) 310{ 311 return 0; 312} 313static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, 314 struct vm_area_struct *vma) 315{ 316 return NULL; 317} 318static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, 319 struct vm_area_struct *vma) 320{ 321 return NULL; 322} 323 324static inline int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd) 325{ 326 return 0; 327} 328 329static inline bool is_huge_zero_page(struct page *page) 330{ 331 return false; 332} 333 334static inline bool is_huge_zero_pud(pud_t pud) 335{ 336 return false; 337} 338 339static inline void mm_put_huge_zero_page(struct mm_struct *mm) 340{ 341 return; 342} 343 344static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, 345 unsigned long addr, pmd_t *pmd, int flags) 346{ 347 return NULL; 348} 349 350static inline struct page *follow_devmap_pud(struct vm_area_struct *vma, 351 unsigned long addr, pud_t *pud, int flags) 352{ 353 return NULL; 354} 355 356static inline bool thp_migration_supported(void) 357{ 358 return false; 359} 360#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 361 362#endif /* _LINUX_HUGE_MM_H */