at v4.7 6.9 kB view raw
1#ifndef _LINUX_HUGE_MM_H 2#define _LINUX_HUGE_MM_H 3 4extern int do_huge_pmd_anonymous_page(struct mm_struct *mm, 5 struct vm_area_struct *vma, 6 unsigned long address, pmd_t *pmd, 7 unsigned int flags); 8extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, 9 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 10 struct vm_area_struct *vma); 11extern void huge_pmd_set_accessed(struct mm_struct *mm, 12 struct vm_area_struct *vma, 13 unsigned long address, pmd_t *pmd, 14 pmd_t orig_pmd, int dirty); 15extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, 16 unsigned long address, pmd_t *pmd, 17 pmd_t orig_pmd); 18extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, 19 unsigned long addr, 20 pmd_t *pmd, 21 unsigned int flags); 22extern int madvise_free_huge_pmd(struct mmu_gather *tlb, 23 struct vm_area_struct *vma, 24 pmd_t *pmd, unsigned long addr, unsigned long next); 25extern int zap_huge_pmd(struct mmu_gather *tlb, 26 struct vm_area_struct *vma, 27 pmd_t *pmd, unsigned long addr); 28extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 29 unsigned long addr, unsigned long end, 30 unsigned char *vec); 31extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, 32 unsigned long new_addr, unsigned long old_end, 33 pmd_t *old_pmd, pmd_t *new_pmd); 34extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 35 unsigned long addr, pgprot_t newprot, 36 int prot_numa); 37int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *, 38 pfn_t pfn, bool write); 39enum transparent_hugepage_flag { 40 TRANSPARENT_HUGEPAGE_FLAG, 41 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 42 TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, 43 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, 44 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, 45 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, 46 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG, 47#ifdef CONFIG_DEBUG_VM 48 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG, 49#endif 50}; 51 52#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) 53#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) 54 55#ifdef CONFIG_TRANSPARENT_HUGEPAGE 56struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, 57 pmd_t *pmd, int flags); 58 59#define HPAGE_PMD_SHIFT PMD_SHIFT 60#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT) 61#define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1)) 62 63extern bool is_vma_temporary_stack(struct vm_area_struct *vma); 64 65#define transparent_hugepage_enabled(__vma) \ 66 ((transparent_hugepage_flags & \ 67 (1<<TRANSPARENT_HUGEPAGE_FLAG) || \ 68 (transparent_hugepage_flags & \ 69 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \ 70 ((__vma)->vm_flags & VM_HUGEPAGE))) && \ 71 !((__vma)->vm_flags & VM_NOHUGEPAGE) && \ 72 !is_vma_temporary_stack(__vma)) 73#define transparent_hugepage_use_zero_page() \ 74 (transparent_hugepage_flags & \ 75 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) 76#ifdef CONFIG_DEBUG_VM 77#define transparent_hugepage_debug_cow() \ 78 (transparent_hugepage_flags & \ 79 (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG)) 80#else /* CONFIG_DEBUG_VM */ 81#define transparent_hugepage_debug_cow() 0 82#endif /* CONFIG_DEBUG_VM */ 83 84extern unsigned long transparent_hugepage_flags; 85 86extern void prep_transhuge_page(struct page *page); 87extern void free_transhuge_page(struct page *page); 88 89int split_huge_page_to_list(struct page *page, struct list_head *list); 90static inline int split_huge_page(struct page *page) 91{ 92 return split_huge_page_to_list(page, NULL); 93} 94void deferred_split_huge_page(struct page *page); 95 96void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 97 unsigned long address, bool freeze, struct page *page); 98 99#define split_huge_pmd(__vma, __pmd, __address) \ 100 do { \ 101 pmd_t *____pmd = (__pmd); \ 102 if (pmd_trans_huge(*____pmd) \ 103 || pmd_devmap(*____pmd)) \ 104 __split_huge_pmd(__vma, __pmd, __address, \ 105 false, NULL); \ 106 } while (0) 107 108 109void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, 110 bool freeze, struct page *page); 111 112extern int hugepage_madvise(struct vm_area_struct *vma, 113 unsigned long *vm_flags, int advice); 114extern void vma_adjust_trans_huge(struct vm_area_struct *vma, 115 unsigned long start, 116 unsigned long end, 117 long adjust_next); 118extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, 119 struct vm_area_struct *vma); 120/* mmap_sem must be held on entry */ 121static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, 122 struct vm_area_struct *vma) 123{ 124 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); 125 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) 126 return __pmd_trans_huge_lock(pmd, vma); 127 else 128 return NULL; 129} 130static inline int hpage_nr_pages(struct page *page) 131{ 132 if (unlikely(PageTransHuge(page))) 133 return HPAGE_PMD_NR; 134 return 1; 135} 136 137extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, 138 unsigned long addr, pmd_t pmd, pmd_t *pmdp); 139 140extern struct page *huge_zero_page; 141 142static inline bool is_huge_zero_page(struct page *page) 143{ 144 return ACCESS_ONCE(huge_zero_page) == page; 145} 146 147static inline bool is_huge_zero_pmd(pmd_t pmd) 148{ 149 return is_huge_zero_page(pmd_page(pmd)); 150} 151 152struct page *get_huge_zero_page(void); 153void put_huge_zero_page(void); 154 155#else /* CONFIG_TRANSPARENT_HUGEPAGE */ 156#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) 157#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) 158#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; }) 159 160#define hpage_nr_pages(x) 1 161 162#define transparent_hugepage_enabled(__vma) 0 163 164#define transparent_hugepage_flags 0UL 165static inline int 166split_huge_page_to_list(struct page *page, struct list_head *list) 167{ 168 return 0; 169} 170static inline int split_huge_page(struct page *page) 171{ 172 return 0; 173} 174static inline void deferred_split_huge_page(struct page *page) {} 175#define split_huge_pmd(__vma, __pmd, __address) \ 176 do { } while (0) 177 178static inline void split_huge_pmd_address(struct vm_area_struct *vma, 179 unsigned long address, bool freeze, struct page *page) {} 180 181static inline int hugepage_madvise(struct vm_area_struct *vma, 182 unsigned long *vm_flags, int advice) 183{ 184 BUG(); 185 return 0; 186} 187static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, 188 unsigned long start, 189 unsigned long end, 190 long adjust_next) 191{ 192} 193static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, 194 struct vm_area_struct *vma) 195{ 196 return NULL; 197} 198 199static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, 200 unsigned long addr, pmd_t pmd, pmd_t *pmdp) 201{ 202 return 0; 203} 204 205static inline bool is_huge_zero_page(struct page *page) 206{ 207 return false; 208} 209 210static inline void put_huge_zero_page(void) 211{ 212 BUILD_BUG(); 213} 214 215static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, 216 unsigned long addr, pmd_t *pmd, int flags) 217{ 218 return NULL; 219} 220#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 221 222#endif /* _LINUX_HUGE_MM_H */