at v3.13 7.6 kB view raw
1#ifndef _LINUX_HUGE_MM_H 2#define _LINUX_HUGE_MM_H 3 4extern int do_huge_pmd_anonymous_page(struct mm_struct *mm, 5 struct vm_area_struct *vma, 6 unsigned long address, pmd_t *pmd, 7 unsigned int flags); 8extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, 9 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 10 struct vm_area_struct *vma); 11extern void huge_pmd_set_accessed(struct mm_struct *mm, 12 struct vm_area_struct *vma, 13 unsigned long address, pmd_t *pmd, 14 pmd_t orig_pmd, int dirty); 15extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, 16 unsigned long address, pmd_t *pmd, 17 pmd_t orig_pmd); 18extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, 19 unsigned long addr, 20 pmd_t *pmd, 21 unsigned int flags); 22extern int zap_huge_pmd(struct mmu_gather *tlb, 23 struct vm_area_struct *vma, 24 pmd_t *pmd, unsigned long addr); 25extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 26 unsigned long addr, unsigned long end, 27 unsigned char *vec); 28extern int move_huge_pmd(struct vm_area_struct *vma, 29 struct vm_area_struct *new_vma, 30 unsigned long old_addr, 31 unsigned long new_addr, unsigned long old_end, 32 pmd_t *old_pmd, pmd_t *new_pmd); 33extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 34 unsigned long addr, pgprot_t newprot, 35 int prot_numa); 36 37enum transparent_hugepage_flag { 38 TRANSPARENT_HUGEPAGE_FLAG, 39 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, 40 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG, 41 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, 42 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, 43 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG, 44#ifdef CONFIG_DEBUG_VM 45 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG, 46#endif 47}; 48 49enum page_check_address_pmd_flag { 50 PAGE_CHECK_ADDRESS_PMD_FLAG, 51 PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG, 52 PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG, 53}; 54extern pmd_t *page_check_address_pmd(struct page *page, 55 struct mm_struct *mm, 56 unsigned long address, 57 enum page_check_address_pmd_flag flag, 58 spinlock_t **ptl); 59 60#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) 61#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) 62 63#ifdef CONFIG_TRANSPARENT_HUGEPAGE 64#define HPAGE_PMD_SHIFT PMD_SHIFT 65#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT) 66#define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1)) 67 68extern bool is_vma_temporary_stack(struct vm_area_struct *vma); 69 70#define transparent_hugepage_enabled(__vma) \ 71 ((transparent_hugepage_flags & \ 72 (1<<TRANSPARENT_HUGEPAGE_FLAG) || \ 73 (transparent_hugepage_flags & \ 74 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \ 75 ((__vma)->vm_flags & VM_HUGEPAGE))) && \ 76 !((__vma)->vm_flags & VM_NOHUGEPAGE) && \ 77 !is_vma_temporary_stack(__vma)) 78#define transparent_hugepage_defrag(__vma) \ 79 ((transparent_hugepage_flags & \ 80 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) || \ 81 (transparent_hugepage_flags & \ 82 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG) && \ 83 (__vma)->vm_flags & VM_HUGEPAGE)) 84#define transparent_hugepage_use_zero_page() \ 85 (transparent_hugepage_flags & \ 86 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) 87#ifdef CONFIG_DEBUG_VM 88#define transparent_hugepage_debug_cow() \ 89 (transparent_hugepage_flags & \ 90 (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG)) 91#else /* CONFIG_DEBUG_VM */ 92#define transparent_hugepage_debug_cow() 0 93#endif /* CONFIG_DEBUG_VM */ 94 95extern unsigned long transparent_hugepage_flags; 96extern int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 97 pmd_t *dst_pmd, pmd_t *src_pmd, 98 struct vm_area_struct *vma, 99 unsigned long addr, unsigned long end); 100extern int split_huge_page_to_list(struct page *page, struct list_head *list); 101static inline int split_huge_page(struct page *page) 102{ 103 return split_huge_page_to_list(page, NULL); 104} 105extern void __split_huge_page_pmd(struct vm_area_struct *vma, 106 unsigned long address, pmd_t *pmd); 107#define split_huge_page_pmd(__vma, __address, __pmd) \ 108 do { \ 109 pmd_t *____pmd = (__pmd); \ 110 if (unlikely(pmd_trans_huge(*____pmd))) \ 111 __split_huge_page_pmd(__vma, __address, \ 112 ____pmd); \ 113 } while (0) 114#define wait_split_huge_page(__anon_vma, __pmd) \ 115 do { \ 116 pmd_t *____pmd = (__pmd); \ 117 anon_vma_lock_write(__anon_vma); \ 118 anon_vma_unlock_write(__anon_vma); \ 119 BUG_ON(pmd_trans_splitting(*____pmd) || \ 120 pmd_trans_huge(*____pmd)); \ 121 } while (0) 122extern void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address, 123 pmd_t *pmd); 124#if HPAGE_PMD_ORDER >= MAX_ORDER 125#error "hugepages can't be allocated by the buddy allocator" 126#endif 127extern int hugepage_madvise(struct vm_area_struct *vma, 128 unsigned long *vm_flags, int advice); 129extern void __vma_adjust_trans_huge(struct vm_area_struct *vma, 130 unsigned long start, 131 unsigned long end, 132 long adjust_next); 133extern int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, 134 spinlock_t **ptl); 135/* mmap_sem must be held on entry */ 136static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, 137 spinlock_t **ptl) 138{ 139 VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem)); 140 if (pmd_trans_huge(*pmd)) 141 return __pmd_trans_huge_lock(pmd, vma, ptl); 142 else 143 return 0; 144} 145static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, 146 unsigned long start, 147 unsigned long end, 148 long adjust_next) 149{ 150 if (!vma->anon_vma || vma->vm_ops) 151 return; 152 __vma_adjust_trans_huge(vma, start, end, adjust_next); 153} 154static inline int hpage_nr_pages(struct page *page) 155{ 156 if (unlikely(PageTransHuge(page))) 157 return HPAGE_PMD_NR; 158 return 1; 159} 160static inline struct page *compound_trans_head(struct page *page) 161{ 162 if (PageTail(page)) { 163 struct page *head; 164 head = page->first_page; 165 smp_rmb(); 166 /* 167 * head may be a dangling pointer. 168 * __split_huge_page_refcount clears PageTail before 169 * overwriting first_page, so if PageTail is still 170 * there it means the head pointer isn't dangling. 171 */ 172 if (PageTail(page)) 173 return head; 174 } 175 return page; 176} 177 178extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, 179 unsigned long addr, pmd_t pmd, pmd_t *pmdp); 180 181#else /* CONFIG_TRANSPARENT_HUGEPAGE */ 182#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) 183#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) 184#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; }) 185 186#define hpage_nr_pages(x) 1 187 188#define transparent_hugepage_enabled(__vma) 0 189 190#define transparent_hugepage_flags 0UL 191static inline int 192split_huge_page_to_list(struct page *page, struct list_head *list) 193{ 194 return 0; 195} 196static inline int split_huge_page(struct page *page) 197{ 198 return 0; 199} 200#define split_huge_page_pmd(__vma, __address, __pmd) \ 201 do { } while (0) 202#define wait_split_huge_page(__anon_vma, __pmd) \ 203 do { } while (0) 204#define split_huge_page_pmd_mm(__mm, __address, __pmd) \ 205 do { } while (0) 206#define compound_trans_head(page) compound_head(page) 207static inline int hugepage_madvise(struct vm_area_struct *vma, 208 unsigned long *vm_flags, int advice) 209{ 210 BUG(); 211 return 0; 212} 213static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, 214 unsigned long start, 215 unsigned long end, 216 long adjust_next) 217{ 218} 219static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, 220 spinlock_t **ptl) 221{ 222 return 0; 223} 224 225static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, 226 unsigned long addr, pmd_t pmd, pmd_t *pmdp) 227{ 228 return 0; 229} 230 231#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 232 233#endif /* _LINUX_HUGE_MM_H */