1#ifndef _LINUX_HUGE_MM_H 2#define _LINUX_HUGE_MM_H 3 4extern int do_huge_pmd_anonymous_page(struct mm_struct *mm, 5 struct vm_area_struct *vma, 6 unsigned long address, pmd_t *pmd, 7 unsigned int flags); 8extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, 9 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 10 struct vm_area_struct *vma); 11extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, 12 unsigned long address, pmd_t *pmd, 13 pmd_t orig_pmd); 14extern pgtable_t get_pmd_huge_pte(struct mm_struct *mm); 15extern struct page *follow_trans_huge_pmd(struct mm_struct *mm, 16 unsigned long addr, 17 pmd_t *pmd, 18 unsigned int flags); 19extern int zap_huge_pmd(struct mmu_gather *tlb, 20 struct vm_area_struct *vma, 21 pmd_t *pmd); 22extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 23 unsigned long addr, unsigned long end, 24 unsigned char *vec); 25extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 26 unsigned long addr, pgprot_t newprot); 27 28enum transparent_hugepage_flag { 29 TRANSPARENT_HUGEPAGE_FLAG, 30 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 31 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG, 32 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, 33 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, 34#ifdef CONFIG_DEBUG_VM 35 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG, 36#endif 37}; 38 39enum page_check_address_pmd_flag { 40 PAGE_CHECK_ADDRESS_PMD_FLAG, 41 PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG, 42 PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG, 43}; 44extern pmd_t *page_check_address_pmd(struct page *page, 45 struct mm_struct *mm, 46 unsigned long address, 47 enum page_check_address_pmd_flag flag); 48 49#ifdef CONFIG_TRANSPARENT_HUGEPAGE 50#define HPAGE_PMD_SHIFT HPAGE_SHIFT 51#define HPAGE_PMD_MASK HPAGE_MASK 52#define HPAGE_PMD_SIZE HPAGE_SIZE 53 54#define transparent_hugepage_enabled(__vma) \ 55 ((transparent_hugepage_flags & \ 56 (1<<TRANSPARENT_HUGEPAGE_FLAG) || \ 57 (transparent_hugepage_flags & \ 58 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \ 59 ((__vma)->vm_flags & VM_HUGEPAGE))) && \ 60 !((__vma)->vm_flags & VM_NOHUGEPAGE) && \ 61 !is_vma_temporary_stack(__vma)) 62#define transparent_hugepage_defrag(__vma) \ 63 ((transparent_hugepage_flags & \ 64 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) || \ 65 (transparent_hugepage_flags & \ 66 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG) && \ 67 (__vma)->vm_flags & VM_HUGEPAGE)) 68#ifdef CONFIG_DEBUG_VM 69#define transparent_hugepage_debug_cow() \ 70 (transparent_hugepage_flags & \ 71 (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG)) 72#else /* CONFIG_DEBUG_VM */ 73#define transparent_hugepage_debug_cow() 0 74#endif /* CONFIG_DEBUG_VM */ 75 76extern unsigned long transparent_hugepage_flags; 77extern int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 78 pmd_t *dst_pmd, pmd_t *src_pmd, 79 struct vm_area_struct *vma, 80 unsigned long addr, unsigned long end); 81extern int handle_pte_fault(struct mm_struct *mm, 82 struct vm_area_struct *vma, unsigned long address, 83 pte_t *pte, pmd_t *pmd, unsigned int flags); 84extern int split_huge_page(struct page *page); 85extern void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd); 86#define split_huge_page_pmd(__mm, __pmd) \ 87 do { \ 88 pmd_t *____pmd = (__pmd); \ 89 if (unlikely(pmd_trans_huge(*____pmd))) \ 90 __split_huge_page_pmd(__mm, ____pmd); \ 91 } while (0) 92#define wait_split_huge_page(__anon_vma, __pmd) \ 93 do { \ 94 pmd_t *____pmd = (__pmd); \ 95 spin_unlock_wait(&(__anon_vma)->root->lock); \ 96 /* \ 97 * spin_unlock_wait() is just a loop in C and so the \ 98 * CPU can reorder anything around it. \ 99 */ \ 100 smp_mb(); \ 101 BUG_ON(pmd_trans_splitting(*____pmd) || \ 102 pmd_trans_huge(*____pmd)); \ 103 } while (0) 104#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) 105#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) 106#if HPAGE_PMD_ORDER > MAX_ORDER 107#error "hugepages can't be allocated by the buddy allocator" 108#endif 109extern int hugepage_madvise(struct vm_area_struct *vma, 110 unsigned long *vm_flags, int advice); 111extern void __vma_adjust_trans_huge(struct vm_area_struct *vma, 112 unsigned long start, 113 unsigned long end, 114 long adjust_next); 115static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, 116 unsigned long start, 117 unsigned long end, 118 long adjust_next) 119{ 120 if (!vma->anon_vma || vma->vm_ops || vma->vm_file) 121 return; 122 __vma_adjust_trans_huge(vma, start, end, adjust_next); 123} 124static inline int hpage_nr_pages(struct page *page) 125{ 126 if (unlikely(PageTransHuge(page))) 127 return HPAGE_PMD_NR; 128 return 1; 129} 130static inline struct page *compound_trans_head(struct page *page) 131{ 132 if (PageTail(page)) { 133 struct page *head; 134 head = page->first_page; 135 smp_rmb(); 136 /* 137 * head may be a dangling pointer. 138 * __split_huge_page_refcount clears PageTail before 139 * overwriting first_page, so if PageTail is still 140 * there it means the head pointer isn't dangling. 141 */ 142 if (PageTail(page)) 143 return head; 144 } 145 return page; 146} 147#else /* CONFIG_TRANSPARENT_HUGEPAGE */ 148#define HPAGE_PMD_SHIFT ({ BUG(); 0; }) 149#define HPAGE_PMD_MASK ({ BUG(); 0; }) 150#define HPAGE_PMD_SIZE ({ BUG(); 0; }) 151 152#define hpage_nr_pages(x) 1 153 154#define transparent_hugepage_enabled(__vma) 0 155 156#define transparent_hugepage_flags 0UL 157static inline int split_huge_page(struct page *page) 158{ 159 return 0; 160} 161#define split_huge_page_pmd(__mm, __pmd) \ 162 do { } while (0) 163#define wait_split_huge_page(__anon_vma, __pmd) \ 164 do { } while (0) 165#define compound_trans_head(page) compound_head(page) 166static inline int hugepage_madvise(struct vm_area_struct *vma, 167 unsigned long *vm_flags, int advice) 168{ 169 BUG(); 170 return 0; 171} 172static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, 173 unsigned long start, 174 unsigned long end, 175 long adjust_next) 176{ 177} 178#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 179 180#endif /* _LINUX_HUGE_MM_H */