at v4.20 364 lines 11 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_HUGE_MM_H 3#define _LINUX_HUGE_MM_H 4 5#include <linux/sched/coredump.h> 6#include <linux/mm_types.h> 7 8#include <linux/fs.h> /* only for vma_is_dax() */ 9 10extern vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf); 11extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, 12 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 13 struct vm_area_struct *vma); 14extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd); 15extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, 16 pud_t *dst_pud, pud_t *src_pud, unsigned long addr, 17 struct vm_area_struct *vma); 18 19#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 20extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud); 21#else 22static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) 23{ 24} 25#endif 26 27extern vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd); 28extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, 29 unsigned long addr, 30 pmd_t *pmd, 31 unsigned int flags); 32extern bool madvise_free_huge_pmd(struct mmu_gather *tlb, 33 struct vm_area_struct *vma, 34 pmd_t *pmd, unsigned long addr, unsigned long next); 35extern int zap_huge_pmd(struct mmu_gather *tlb, 36 struct vm_area_struct *vma, 37 pmd_t *pmd, unsigned long addr); 38extern int zap_huge_pud(struct mmu_gather *tlb, 39 struct vm_area_struct *vma, 40 pud_t *pud, unsigned long addr); 41extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 42 unsigned long addr, unsigned long end, 43 unsigned char *vec); 44extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, 45 unsigned long new_addr, unsigned long old_end, 46 pmd_t *old_pmd, pmd_t *new_pmd); 47extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 48 unsigned long addr, pgprot_t newprot, 49 int prot_numa); 50vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, 51 pmd_t *pmd, pfn_t pfn, bool write); 52vm_fault_t vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, 53 pud_t *pud, pfn_t pfn, bool write); 54enum transparent_hugepage_flag { 55 TRANSPARENT_HUGEPAGE_FLAG, 56 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 57 TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, 58 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, 59 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, 60 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, 61 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, 62 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG, 63#ifdef CONFIG_DEBUG_VM 64 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG, 65#endif 66}; 67 68struct kobject; 69struct kobj_attribute; 70 71extern ssize_t single_hugepage_flag_store(struct kobject *kobj, 72 struct kobj_attribute *attr, 73 const char *buf, size_t count, 74 enum transparent_hugepage_flag flag); 75extern ssize_t single_hugepage_flag_show(struct kobject *kobj, 76 struct kobj_attribute *attr, char *buf, 77 enum transparent_hugepage_flag flag); 78extern struct kobj_attribute shmem_enabled_attr; 79 80#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) 81#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) 82 83#ifdef CONFIG_TRANSPARENT_HUGEPAGE 84#define HPAGE_PMD_SHIFT PMD_SHIFT 85#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT) 86#define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1)) 87 88#define HPAGE_PUD_SHIFT PUD_SHIFT 89#define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT) 90#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1)) 91 92extern bool is_vma_temporary_stack(struct vm_area_struct *vma); 93 94extern unsigned long transparent_hugepage_flags; 95 96static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma) 97{ 98 if (vma->vm_flags & VM_NOHUGEPAGE) 99 return false; 100 101 if (is_vma_temporary_stack(vma)) 102 return false; 103 104 if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) 105 return false; 106 107 if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG)) 108 return true; 109 110 if (vma_is_dax(vma)) 111 return true; 112 113 if (transparent_hugepage_flags & 114 (1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)) 115 return !!(vma->vm_flags & VM_HUGEPAGE); 116 117 return false; 118} 119 120#define transparent_hugepage_use_zero_page() \ 121 (transparent_hugepage_flags & \ 122 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) 123#ifdef CONFIG_DEBUG_VM 124#define transparent_hugepage_debug_cow() \ 125 (transparent_hugepage_flags & \ 126 (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG)) 127#else /* CONFIG_DEBUG_VM */ 128#define transparent_hugepage_debug_cow() 0 129#endif /* CONFIG_DEBUG_VM */ 130 131extern unsigned long thp_get_unmapped_area(struct file *filp, 132 unsigned long addr, unsigned long len, unsigned long pgoff, 133 unsigned long flags); 134 135extern void prep_transhuge_page(struct page *page); 136extern void free_transhuge_page(struct page *page); 137 138bool can_split_huge_page(struct page *page, int *pextra_pins); 139int split_huge_page_to_list(struct page *page, struct list_head *list); 140static inline int split_huge_page(struct page *page) 141{ 142 return split_huge_page_to_list(page, NULL); 143} 144void deferred_split_huge_page(struct page *page); 145 146void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 147 unsigned long address, bool freeze, struct page *page); 148 149#define split_huge_pmd(__vma, __pmd, __address) \ 150 do { \ 151 pmd_t *____pmd = (__pmd); \ 152 if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd) \ 153 || pmd_devmap(*____pmd)) \ 154 __split_huge_pmd(__vma, __pmd, __address, \ 155 false, NULL); \ 156 } while (0) 157 158 159void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, 160 bool freeze, struct page *page); 161 162void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, 163 unsigned long address); 164 165#define split_huge_pud(__vma, __pud, __address) \ 166 do { \ 167 pud_t *____pud = (__pud); \ 168 if (pud_trans_huge(*____pud) \ 169 || pud_devmap(*____pud)) \ 170 __split_huge_pud(__vma, __pud, __address); \ 171 } while (0) 172 173extern int hugepage_madvise(struct vm_area_struct *vma, 174 unsigned long *vm_flags, int advice); 175extern void vma_adjust_trans_huge(struct vm_area_struct *vma, 176 unsigned long start, 177 unsigned long end, 178 long adjust_next); 179extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, 180 struct vm_area_struct *vma); 181extern spinlock_t *__pud_trans_huge_lock(pud_t *pud, 182 struct vm_area_struct *vma); 183 184static inline int is_swap_pmd(pmd_t pmd) 185{ 186 return !pmd_none(pmd) && !pmd_present(pmd); 187} 188 189/* mmap_sem must be held on entry */ 190static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, 191 struct vm_area_struct *vma) 192{ 193 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); 194 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) 195 return __pmd_trans_huge_lock(pmd, vma); 196 else 197 return NULL; 198} 199static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, 200 struct vm_area_struct *vma) 201{ 202 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); 203 if (pud_trans_huge(*pud) || pud_devmap(*pud)) 204 return __pud_trans_huge_lock(pud, vma); 205 else 206 return NULL; 207} 208static inline int hpage_nr_pages(struct page *page) 209{ 210 if (unlikely(PageTransHuge(page))) 211 return HPAGE_PMD_NR; 212 return 1; 213} 214 215struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, 216 pmd_t *pmd, int flags, struct dev_pagemap **pgmap); 217struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, 218 pud_t *pud, int flags, struct dev_pagemap **pgmap); 219 220extern vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd); 221 222extern struct page *huge_zero_page; 223 224static inline bool is_huge_zero_page(struct page *page) 225{ 226 return READ_ONCE(huge_zero_page) == page; 227} 228 229static inline bool is_huge_zero_pmd(pmd_t pmd) 230{ 231 return is_huge_zero_page(pmd_page(pmd)); 232} 233 234static inline bool is_huge_zero_pud(pud_t pud) 235{ 236 return false; 237} 238 239struct page *mm_get_huge_zero_page(struct mm_struct *mm); 240void mm_put_huge_zero_page(struct mm_struct *mm); 241 242#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot)) 243 244static inline bool thp_migration_supported(void) 245{ 246 return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION); 247} 248 249#else /* CONFIG_TRANSPARENT_HUGEPAGE */ 250#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) 251#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) 252#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; }) 253 254#define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; }) 255#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; }) 256#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; }) 257 258#define hpage_nr_pages(x) 1 259 260static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma) 261{ 262 return false; 263} 264 265static inline void prep_transhuge_page(struct page *page) {} 266 267#define transparent_hugepage_flags 0UL 268 269#define thp_get_unmapped_area NULL 270 271static inline bool 272can_split_huge_page(struct page *page, int *pextra_pins) 273{ 274 BUILD_BUG(); 275 return false; 276} 277static inline int 278split_huge_page_to_list(struct page *page, struct list_head *list) 279{ 280 return 0; 281} 282static inline int split_huge_page(struct page *page) 283{ 284 return 0; 285} 286static inline void deferred_split_huge_page(struct page *page) {} 287#define split_huge_pmd(__vma, __pmd, __address) \ 288 do { } while (0) 289 290static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 291 unsigned long address, bool freeze, struct page *page) {} 292static inline void split_huge_pmd_address(struct vm_area_struct *vma, 293 unsigned long address, bool freeze, struct page *page) {} 294 295#define split_huge_pud(__vma, __pmd, __address) \ 296 do { } while (0) 297 298static inline int hugepage_madvise(struct vm_area_struct *vma, 299 unsigned long *vm_flags, int advice) 300{ 301 BUG(); 302 return 0; 303} 304static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, 305 unsigned long start, 306 unsigned long end, 307 long adjust_next) 308{ 309} 310static inline int is_swap_pmd(pmd_t pmd) 311{ 312 return 0; 313} 314static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, 315 struct vm_area_struct *vma) 316{ 317 return NULL; 318} 319static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, 320 struct vm_area_struct *vma) 321{ 322 return NULL; 323} 324 325static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, 326 pmd_t orig_pmd) 327{ 328 return 0; 329} 330 331static inline bool is_huge_zero_page(struct page *page) 332{ 333 return false; 334} 335 336static inline bool is_huge_zero_pud(pud_t pud) 337{ 338 return false; 339} 340 341static inline void mm_put_huge_zero_page(struct mm_struct *mm) 342{ 343 return; 344} 345 346static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, 347 unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap) 348{ 349 return NULL; 350} 351 352static inline struct page *follow_devmap_pud(struct vm_area_struct *vma, 353 unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap) 354{ 355 return NULL; 356} 357 358static inline bool thp_migration_supported(void) 359{ 360 return false; 361} 362#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 363 364#endif /* _LINUX_HUGE_MM_H */