at v4.13 10 kB view raw
1#ifndef _LINUX_HUGE_MM_H 2#define _LINUX_HUGE_MM_H 3 4#include <linux/sched/coredump.h> 5 6#include <linux/fs.h> /* only for vma_is_dax() */ 7 8extern int do_huge_pmd_anonymous_page(struct vm_fault *vmf); 9extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, 10 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 11 struct vm_area_struct *vma); 12extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd); 13extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, 14 pud_t *dst_pud, pud_t *src_pud, unsigned long addr, 15 struct vm_area_struct *vma); 16 17#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 18extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud); 19#else 20static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) 21{ 22} 23#endif 24 25extern int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd); 26extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, 27 unsigned long addr, 28 pmd_t *pmd, 29 unsigned int flags); 30extern bool madvise_free_huge_pmd(struct mmu_gather *tlb, 31 struct vm_area_struct *vma, 32 pmd_t *pmd, unsigned long addr, unsigned long next); 33extern int zap_huge_pmd(struct mmu_gather *tlb, 34 struct vm_area_struct *vma, 35 pmd_t *pmd, unsigned long addr); 36extern int zap_huge_pud(struct mmu_gather *tlb, 37 struct vm_area_struct *vma, 38 pud_t *pud, unsigned long addr); 39extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 40 unsigned long addr, unsigned long end, 41 unsigned char *vec); 42extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, 43 unsigned long new_addr, unsigned long old_end, 44 pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush); 45extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 46 unsigned long addr, pgprot_t newprot, 47 int prot_numa); 48int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, 49 pmd_t *pmd, pfn_t pfn, bool write); 50int vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, 51 pud_t *pud, pfn_t pfn, bool write); 52enum transparent_hugepage_flag { 53 TRANSPARENT_HUGEPAGE_FLAG, 54 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 55 TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, 56 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, 57 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, 58 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, 59 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, 60 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG, 61#ifdef CONFIG_DEBUG_VM 62 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG, 63#endif 64}; 65 66struct kobject; 67struct kobj_attribute; 68 69extern ssize_t single_hugepage_flag_store(struct kobject *kobj, 70 struct kobj_attribute *attr, 71 const char *buf, size_t count, 72 enum transparent_hugepage_flag flag); 73extern ssize_t single_hugepage_flag_show(struct kobject *kobj, 74 struct kobj_attribute *attr, char *buf, 75 enum transparent_hugepage_flag flag); 76extern struct kobj_attribute shmem_enabled_attr; 77 78#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) 79#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) 80 81#ifdef CONFIG_TRANSPARENT_HUGEPAGE 82#define HPAGE_PMD_SHIFT PMD_SHIFT 83#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT) 84#define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1)) 85 86#define HPAGE_PUD_SHIFT PUD_SHIFT 87#define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT) 88#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1)) 89 90extern bool is_vma_temporary_stack(struct vm_area_struct *vma); 91 92extern unsigned long transparent_hugepage_flags; 93 94static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma) 95{ 96 if (vma->vm_flags & VM_NOHUGEPAGE) 97 return false; 98 99 if (is_vma_temporary_stack(vma)) 100 return false; 101 102 if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) 103 return false; 104 105 if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG)) 106 return true; 107 108 if (vma_is_dax(vma)) 109 return true; 110 111 if (transparent_hugepage_flags & 112 (1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)) 113 return !!(vma->vm_flags & VM_HUGEPAGE); 114 115 return false; 116} 117 118#define transparent_hugepage_use_zero_page() \ 119 (transparent_hugepage_flags & \ 120 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) 121#ifdef CONFIG_DEBUG_VM 122#define transparent_hugepage_debug_cow() \ 123 (transparent_hugepage_flags & \ 124 (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG)) 125#else /* CONFIG_DEBUG_VM */ 126#define transparent_hugepage_debug_cow() 0 127#endif /* CONFIG_DEBUG_VM */ 128 129extern unsigned long thp_get_unmapped_area(struct file *filp, 130 unsigned long addr, unsigned long len, unsigned long pgoff, 131 unsigned long flags); 132 133extern void prep_transhuge_page(struct page *page); 134extern void free_transhuge_page(struct page *page); 135 136bool can_split_huge_page(struct page *page, int *pextra_pins); 137int split_huge_page_to_list(struct page *page, struct list_head *list); 138static inline int split_huge_page(struct page *page) 139{ 140 return split_huge_page_to_list(page, NULL); 141} 142void deferred_split_huge_page(struct page *page); 143 144void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 145 unsigned long address, bool freeze, struct page *page); 146 147#define split_huge_pmd(__vma, __pmd, __address) \ 148 do { \ 149 pmd_t *____pmd = (__pmd); \ 150 if (pmd_trans_huge(*____pmd) \ 151 || pmd_devmap(*____pmd)) \ 152 __split_huge_pmd(__vma, __pmd, __address, \ 153 false, NULL); \ 154 } while (0) 155 156 157void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, 158 bool freeze, struct page *page); 159 160void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, 161 unsigned long address); 162 163#define split_huge_pud(__vma, __pud, __address) \ 164 do { \ 165 pud_t *____pud = (__pud); \ 166 if (pud_trans_huge(*____pud) \ 167 || pud_devmap(*____pud)) \ 168 __split_huge_pud(__vma, __pud, __address); \ 169 } while (0) 170 171extern int hugepage_madvise(struct vm_area_struct *vma, 172 unsigned long *vm_flags, int advice); 173extern void vma_adjust_trans_huge(struct vm_area_struct *vma, 174 unsigned long start, 175 unsigned long end, 176 long adjust_next); 177extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, 178 struct vm_area_struct *vma); 179extern spinlock_t *__pud_trans_huge_lock(pud_t *pud, 180 struct vm_area_struct *vma); 181/* mmap_sem must be held on entry */ 182static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, 183 struct vm_area_struct *vma) 184{ 185 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); 186 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) 187 return __pmd_trans_huge_lock(pmd, vma); 188 else 189 return NULL; 190} 191static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, 192 struct vm_area_struct *vma) 193{ 194 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); 195 if (pud_trans_huge(*pud) || pud_devmap(*pud)) 196 return __pud_trans_huge_lock(pud, vma); 197 else 198 return NULL; 199} 200static inline int hpage_nr_pages(struct page *page) 201{ 202 if (unlikely(PageTransHuge(page))) 203 return HPAGE_PMD_NR; 204 return 1; 205} 206 207struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, 208 pmd_t *pmd, int flags); 209struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, 210 pud_t *pud, int flags); 211 212extern int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd); 213 214extern struct page *huge_zero_page; 215 216static inline bool is_huge_zero_page(struct page *page) 217{ 218 return ACCESS_ONCE(huge_zero_page) == page; 219} 220 221static inline bool is_huge_zero_pmd(pmd_t pmd) 222{ 223 return is_huge_zero_page(pmd_page(pmd)); 224} 225 226static inline bool is_huge_zero_pud(pud_t pud) 227{ 228 return false; 229} 230 231struct page *mm_get_huge_zero_page(struct mm_struct *mm); 232void mm_put_huge_zero_page(struct mm_struct *mm); 233 234#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot)) 235 236#else /* CONFIG_TRANSPARENT_HUGEPAGE */ 237#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) 238#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) 239#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; }) 240 241#define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; }) 242#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; }) 243#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; }) 244 245#define hpage_nr_pages(x) 1 246 247static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma) 248{ 249 return false; 250} 251 252static inline void prep_transhuge_page(struct page *page) {} 253 254#define transparent_hugepage_flags 0UL 255 256#define thp_get_unmapped_area NULL 257 258static inline bool 259can_split_huge_page(struct page *page, int *pextra_pins) 260{ 261 BUILD_BUG(); 262 return false; 263} 264static inline int 265split_huge_page_to_list(struct page *page, struct list_head *list) 266{ 267 return 0; 268} 269static inline int split_huge_page(struct page *page) 270{ 271 return 0; 272} 273static inline void deferred_split_huge_page(struct page *page) {} 274#define split_huge_pmd(__vma, __pmd, __address) \ 275 do { } while (0) 276 277static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 278 unsigned long address, bool freeze, struct page *page) {} 279static inline void split_huge_pmd_address(struct vm_area_struct *vma, 280 unsigned long address, bool freeze, struct page *page) {} 281 282#define split_huge_pud(__vma, __pmd, __address) \ 283 do { } while (0) 284 285static inline int hugepage_madvise(struct vm_area_struct *vma, 286 unsigned long *vm_flags, int advice) 287{ 288 BUG(); 289 return 0; 290} 291static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, 292 unsigned long start, 293 unsigned long end, 294 long adjust_next) 295{ 296} 297static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, 298 struct vm_area_struct *vma) 299{ 300 return NULL; 301} 302static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, 303 struct vm_area_struct *vma) 304{ 305 return NULL; 306} 307 308static inline int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd) 309{ 310 return 0; 311} 312 313static inline bool is_huge_zero_page(struct page *page) 314{ 315 return false; 316} 317 318static inline bool is_huge_zero_pud(pud_t pud) 319{ 320 return false; 321} 322 323static inline void mm_put_huge_zero_page(struct mm_struct *mm) 324{ 325 return; 326} 327 328static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, 329 unsigned long addr, pmd_t *pmd, int flags) 330{ 331 return NULL; 332} 333 334static inline struct page *follow_devmap_pud(struct vm_area_struct *vma, 335 unsigned long addr, pud_t *pud, int flags) 336{ 337 return NULL; 338} 339#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 340 341#endif /* _LINUX_HUGE_MM_H */