at v4.9 235 lines 7.3 kB view raw
1#ifndef _LINUX_HUGE_MM_H 2#define _LINUX_HUGE_MM_H 3 4extern int do_huge_pmd_anonymous_page(struct fault_env *fe); 5extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, 6 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 7 struct vm_area_struct *vma); 8extern void huge_pmd_set_accessed(struct fault_env *fe, pmd_t orig_pmd); 9extern int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd); 10extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, 11 unsigned long addr, 12 pmd_t *pmd, 13 unsigned int flags); 14extern bool madvise_free_huge_pmd(struct mmu_gather *tlb, 15 struct vm_area_struct *vma, 16 pmd_t *pmd, unsigned long addr, unsigned long next); 17extern int zap_huge_pmd(struct mmu_gather *tlb, 18 struct vm_area_struct *vma, 19 pmd_t *pmd, unsigned long addr); 20extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 21 unsigned long addr, unsigned long end, 22 unsigned char *vec); 23extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, 24 unsigned long new_addr, unsigned long old_end, 25 pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush); 26extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 27 unsigned long addr, pgprot_t newprot, 28 int prot_numa); 29int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *, 30 pfn_t pfn, bool write); 31enum transparent_hugepage_flag { 32 TRANSPARENT_HUGEPAGE_FLAG, 33 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 34 TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, 35 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, 36 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, 37 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, 38 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG, 39#ifdef CONFIG_DEBUG_VM 40 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG, 41#endif 42}; 43 44struct kobject; 45struct kobj_attribute; 46 47extern ssize_t single_hugepage_flag_store(struct kobject *kobj, 48 struct kobj_attribute *attr, 49 const char *buf, size_t count, 50 enum transparent_hugepage_flag flag); 51extern ssize_t single_hugepage_flag_show(struct kobject *kobj, 52 struct kobj_attribute *attr, char *buf, 53 enum transparent_hugepage_flag flag); 54extern struct kobj_attribute shmem_enabled_attr; 55 56#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) 57#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) 58 59#ifdef CONFIG_TRANSPARENT_HUGEPAGE 60struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, 61 pmd_t *pmd, int flags); 62 63#define HPAGE_PMD_SHIFT PMD_SHIFT 64#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT) 65#define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1)) 66 67extern bool is_vma_temporary_stack(struct vm_area_struct *vma); 68 69#define transparent_hugepage_enabled(__vma) \ 70 ((transparent_hugepage_flags & \ 71 (1<<TRANSPARENT_HUGEPAGE_FLAG) || \ 72 (transparent_hugepage_flags & \ 73 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \ 74 ((__vma)->vm_flags & VM_HUGEPAGE))) && \ 75 !((__vma)->vm_flags & VM_NOHUGEPAGE) && \ 76 !is_vma_temporary_stack(__vma)) 77#define transparent_hugepage_use_zero_page() \ 78 (transparent_hugepage_flags & \ 79 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) 80#ifdef CONFIG_DEBUG_VM 81#define transparent_hugepage_debug_cow() \ 82 (transparent_hugepage_flags & \ 83 (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG)) 84#else /* CONFIG_DEBUG_VM */ 85#define transparent_hugepage_debug_cow() 0 86#endif /* CONFIG_DEBUG_VM */ 87 88extern unsigned long transparent_hugepage_flags; 89 90extern unsigned long thp_get_unmapped_area(struct file *filp, 91 unsigned long addr, unsigned long len, unsigned long pgoff, 92 unsigned long flags); 93 94extern void prep_transhuge_page(struct page *page); 95extern void free_transhuge_page(struct page *page); 96 97int split_huge_page_to_list(struct page *page, struct list_head *list); 98static inline int split_huge_page(struct page *page) 99{ 100 return split_huge_page_to_list(page, NULL); 101} 102void deferred_split_huge_page(struct page *page); 103 104void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 105 unsigned long address, bool freeze, struct page *page); 106 107#define split_huge_pmd(__vma, __pmd, __address) \ 108 do { \ 109 pmd_t *____pmd = (__pmd); \ 110 if (pmd_trans_huge(*____pmd) \ 111 || pmd_devmap(*____pmd)) \ 112 __split_huge_pmd(__vma, __pmd, __address, \ 113 false, NULL); \ 114 } while (0) 115 116 117void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, 118 bool freeze, struct page *page); 119 120extern int hugepage_madvise(struct vm_area_struct *vma, 121 unsigned long *vm_flags, int advice); 122extern void vma_adjust_trans_huge(struct vm_area_struct *vma, 123 unsigned long start, 124 unsigned long end, 125 long adjust_next); 126extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, 127 struct vm_area_struct *vma); 128/* mmap_sem must be held on entry */ 129static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, 130 struct vm_area_struct *vma) 131{ 132 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); 133 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) 134 return __pmd_trans_huge_lock(pmd, vma); 135 else 136 return NULL; 137} 138static inline int hpage_nr_pages(struct page *page) 139{ 140 if (unlikely(PageTransHuge(page))) 141 return HPAGE_PMD_NR; 142 return 1; 143} 144 145extern int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t orig_pmd); 146 147extern struct page *huge_zero_page; 148 149static inline bool is_huge_zero_page(struct page *page) 150{ 151 return ACCESS_ONCE(huge_zero_page) == page; 152} 153 154static inline bool is_huge_zero_pmd(pmd_t pmd) 155{ 156 return is_huge_zero_page(pmd_page(pmd)); 157} 158 159struct page *mm_get_huge_zero_page(struct mm_struct *mm); 160void mm_put_huge_zero_page(struct mm_struct *mm); 161 162#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot)) 163 164#else /* CONFIG_TRANSPARENT_HUGEPAGE */ 165#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) 166#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) 167#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; }) 168 169#define hpage_nr_pages(x) 1 170 171#define transparent_hugepage_enabled(__vma) 0 172 173static inline void prep_transhuge_page(struct page *page) {} 174 175#define transparent_hugepage_flags 0UL 176 177#define thp_get_unmapped_area NULL 178 179static inline int 180split_huge_page_to_list(struct page *page, struct list_head *list) 181{ 182 return 0; 183} 184static inline int split_huge_page(struct page *page) 185{ 186 return 0; 187} 188static inline void deferred_split_huge_page(struct page *page) {} 189#define split_huge_pmd(__vma, __pmd, __address) \ 190 do { } while (0) 191 192static inline void split_huge_pmd_address(struct vm_area_struct *vma, 193 unsigned long address, bool freeze, struct page *page) {} 194 195static inline int hugepage_madvise(struct vm_area_struct *vma, 196 unsigned long *vm_flags, int advice) 197{ 198 BUG(); 199 return 0; 200} 201static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, 202 unsigned long start, 203 unsigned long end, 204 long adjust_next) 205{ 206} 207static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, 208 struct vm_area_struct *vma) 209{ 210 return NULL; 211} 212 213static inline int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t orig_pmd) 214{ 215 return 0; 216} 217 218static inline bool is_huge_zero_page(struct page *page) 219{ 220 return false; 221} 222 223static inline void mm_put_huge_zero_page(struct mm_struct *mm) 224{ 225 return; 226} 227 228static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, 229 unsigned long addr, pmd_t *pmd, int flags) 230{ 231 return NULL; 232} 233#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 234 235#endif /* _LINUX_HUGE_MM_H */