at v4.5 218 lines 6.8 kB view raw
1#ifndef _LINUX_HUGE_MM_H 2#define _LINUX_HUGE_MM_H 3 4extern int do_huge_pmd_anonymous_page(struct mm_struct *mm, 5 struct vm_area_struct *vma, 6 unsigned long address, pmd_t *pmd, 7 unsigned int flags); 8extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, 9 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 10 struct vm_area_struct *vma); 11extern void huge_pmd_set_accessed(struct mm_struct *mm, 12 struct vm_area_struct *vma, 13 unsigned long address, pmd_t *pmd, 14 pmd_t orig_pmd, int dirty); 15extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, 16 unsigned long address, pmd_t *pmd, 17 pmd_t orig_pmd); 18extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, 19 unsigned long addr, 20 pmd_t *pmd, 21 unsigned int flags); 22extern int madvise_free_huge_pmd(struct mmu_gather *tlb, 23 struct vm_area_struct *vma, 24 pmd_t *pmd, unsigned long addr, unsigned long next); 25extern int zap_huge_pmd(struct mmu_gather *tlb, 26 struct vm_area_struct *vma, 27 pmd_t *pmd, unsigned long addr); 28extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 29 unsigned long addr, unsigned long end, 30 unsigned char *vec); 31extern bool move_huge_pmd(struct vm_area_struct *vma, 32 struct vm_area_struct *new_vma, 33 unsigned long old_addr, 34 unsigned long new_addr, unsigned long old_end, 35 pmd_t *old_pmd, pmd_t *new_pmd); 36extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 37 unsigned long addr, pgprot_t newprot, 38 int prot_numa); 39int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *, 40 pfn_t pfn, bool write); 41enum transparent_hugepage_flag { 42 TRANSPARENT_HUGEPAGE_FLAG, 43 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 44 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG, 45 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, 46 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, 47 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG, 48#ifdef CONFIG_DEBUG_VM 49 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG, 50#endif 51}; 52 53#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) 54#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) 55 56#ifdef CONFIG_TRANSPARENT_HUGEPAGE 57struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, 58 pmd_t *pmd, int flags); 59 60#define HPAGE_PMD_SHIFT PMD_SHIFT 61#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT) 62#define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1)) 63 64extern bool is_vma_temporary_stack(struct vm_area_struct *vma); 65 66#define transparent_hugepage_enabled(__vma) \ 67 ((transparent_hugepage_flags & \ 68 (1<<TRANSPARENT_HUGEPAGE_FLAG) || \ 69 (transparent_hugepage_flags & \ 70 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \ 71 ((__vma)->vm_flags & VM_HUGEPAGE))) && \ 72 !((__vma)->vm_flags & VM_NOHUGEPAGE) && \ 73 !is_vma_temporary_stack(__vma)) 74#define transparent_hugepage_defrag(__vma) \ 75 ((transparent_hugepage_flags & \ 76 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) || \ 77 (transparent_hugepage_flags & \ 78 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG) && \ 79 (__vma)->vm_flags & VM_HUGEPAGE)) 80#define transparent_hugepage_use_zero_page() \ 81 (transparent_hugepage_flags & \ 82 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) 83#ifdef CONFIG_DEBUG_VM 84#define transparent_hugepage_debug_cow() \ 85 (transparent_hugepage_flags & \ 86 (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG)) 87#else /* CONFIG_DEBUG_VM */ 88#define transparent_hugepage_debug_cow() 0 89#endif /* CONFIG_DEBUG_VM */ 90 91extern unsigned long transparent_hugepage_flags; 92 93extern void prep_transhuge_page(struct page *page); 94extern void free_transhuge_page(struct page *page); 95 96int split_huge_page_to_list(struct page *page, struct list_head *list); 97static inline int split_huge_page(struct page *page) 98{ 99 return split_huge_page_to_list(page, NULL); 100} 101void deferred_split_huge_page(struct page *page); 102 103void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 104 unsigned long address); 105 106#define split_huge_pmd(__vma, __pmd, __address) \ 107 do { \ 108 pmd_t *____pmd = (__pmd); \ 109 if (pmd_trans_huge(*____pmd) \ 110 || pmd_devmap(*____pmd)) \ 111 __split_huge_pmd(__vma, __pmd, __address); \ 112 } while (0) 113 114#if HPAGE_PMD_ORDER >= MAX_ORDER 115#error "hugepages can't be allocated by the buddy allocator" 116#endif 117extern int hugepage_madvise(struct vm_area_struct *vma, 118 unsigned long *vm_flags, int advice); 119extern void vma_adjust_trans_huge(struct vm_area_struct *vma, 120 unsigned long start, 121 unsigned long end, 122 long adjust_next); 123extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, 124 struct vm_area_struct *vma); 125/* mmap_sem must be held on entry */ 126static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, 127 struct vm_area_struct *vma) 128{ 129 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); 130 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) 131 return __pmd_trans_huge_lock(pmd, vma); 132 else 133 return false; 134} 135static inline int hpage_nr_pages(struct page *page) 136{ 137 if (unlikely(PageTransHuge(page))) 138 return HPAGE_PMD_NR; 139 return 1; 140} 141 142extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, 143 unsigned long addr, pmd_t pmd, pmd_t *pmdp); 144 145extern struct page *huge_zero_page; 146 147static inline bool is_huge_zero_page(struct page *page) 148{ 149 return ACCESS_ONCE(huge_zero_page) == page; 150} 151 152static inline bool is_huge_zero_pmd(pmd_t pmd) 153{ 154 return is_huge_zero_page(pmd_page(pmd)); 155} 156 157struct page *get_huge_zero_page(void); 158 159#else /* CONFIG_TRANSPARENT_HUGEPAGE */ 160#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) 161#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) 162#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; }) 163 164#define hpage_nr_pages(x) 1 165 166#define transparent_hugepage_enabled(__vma) 0 167 168#define transparent_hugepage_flags 0UL 169static inline int 170split_huge_page_to_list(struct page *page, struct list_head *list) 171{ 172 return 0; 173} 174static inline int split_huge_page(struct page *page) 175{ 176 return 0; 177} 178static inline void deferred_split_huge_page(struct page *page) {} 179#define split_huge_pmd(__vma, __pmd, __address) \ 180 do { } while (0) 181static inline int hugepage_madvise(struct vm_area_struct *vma, 182 unsigned long *vm_flags, int advice) 183{ 184 BUG(); 185 return 0; 186} 187static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, 188 unsigned long start, 189 unsigned long end, 190 long adjust_next) 191{ 192} 193static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, 194 struct vm_area_struct *vma) 195{ 196 return NULL; 197} 198 199static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, 200 unsigned long addr, pmd_t pmd, pmd_t *pmdp) 201{ 202 return 0; 203} 204 205static inline bool is_huge_zero_page(struct page *page) 206{ 207 return false; 208} 209 210 211static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, 212 unsigned long addr, pmd_t *pmd, int flags) 213{ 214 return NULL; 215} 216#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 217 218#endif /* _LINUX_HUGE_MM_H */