at v3.13 13 kB view raw
1#ifndef _LINUX_HUGETLB_H 2#define _LINUX_HUGETLB_H 3 4#include <linux/mm_types.h> 5#include <linux/fs.h> 6#include <linux/hugetlb_inline.h> 7#include <linux/cgroup.h> 8 9struct ctl_table; 10struct user_struct; 11struct mmu_gather; 12 13#ifdef CONFIG_HUGETLB_PAGE 14 15#include <linux/mempolicy.h> 16#include <linux/shm.h> 17#include <asm/tlbflush.h> 18 19struct hugepage_subpool { 20 spinlock_t lock; 21 long count; 22 long max_hpages, used_hpages; 23}; 24 25extern spinlock_t hugetlb_lock; 26extern int hugetlb_max_hstate __read_mostly; 27#define for_each_hstate(h) \ 28 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++) 29 30struct hugepage_subpool *hugepage_new_subpool(long nr_blocks); 31void hugepage_put_subpool(struct hugepage_subpool *spool); 32 33int PageHuge(struct page *page); 34int PageHeadHuge(struct page *page_head); 35 36void reset_vma_resv_huge_pages(struct vm_area_struct *vma); 37int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); 38int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); 39int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); 40 41#ifdef CONFIG_NUMA 42int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, 43 void __user *, size_t *, loff_t *); 44#endif 45 46int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); 47long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, 48 struct page **, struct vm_area_struct **, 49 unsigned long *, unsigned long *, long, unsigned int); 50void unmap_hugepage_range(struct vm_area_struct *, 51 unsigned long, unsigned long, struct page *); 52void __unmap_hugepage_range_final(struct mmu_gather *tlb, 53 struct vm_area_struct *vma, 54 unsigned long start, unsigned long end, 55 struct page *ref_page); 56void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, 57 unsigned long start, unsigned long end, 58 struct page *ref_page); 59void hugetlb_report_meminfo(struct seq_file *); 60int hugetlb_report_node_meminfo(int, char *); 61void hugetlb_show_meminfo(void); 62unsigned long hugetlb_total_pages(void); 63int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 64 unsigned long address, unsigned int flags); 65int hugetlb_reserve_pages(struct inode *inode, long from, long to, 66 struct vm_area_struct *vma, 67 vm_flags_t vm_flags); 68void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed); 69int dequeue_hwpoisoned_huge_page(struct page *page); 70bool isolate_huge_page(struct page *page, struct list_head *list); 71void putback_active_hugepage(struct page *page); 72bool is_hugepage_active(struct page *page); 73 74#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE 75pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); 76#endif 77 78extern unsigned long hugepages_treat_as_movable; 79extern const unsigned long hugetlb_zero, hugetlb_infinity; 80extern int sysctl_hugetlb_shm_group; 81extern struct list_head huge_boot_pages; 82 83/* arch callbacks */ 84 85pte_t *huge_pte_alloc(struct mm_struct *mm, 86 unsigned long addr, unsigned long sz); 87pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr); 88int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep); 89struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, 90 int write); 91struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, 92 pmd_t *pmd, int write); 93struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address, 94 pud_t *pud, int write); 95int pmd_huge(pmd_t pmd); 96int pud_huge(pud_t pmd); 97unsigned long hugetlb_change_protection(struct vm_area_struct *vma, 98 unsigned long address, unsigned long end, pgprot_t newprot); 99 100#else /* !CONFIG_HUGETLB_PAGE */ 101 102static inline int PageHuge(struct page *page) 103{ 104 return 0; 105} 106 107static inline int PageHeadHuge(struct page *page_head) 108{ 109 return 0; 110} 111 112static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) 113{ 114} 115 116static inline unsigned long hugetlb_total_pages(void) 117{ 118 return 0; 119} 120 121#define follow_hugetlb_page(m,v,p,vs,a,b,i,w) ({ BUG(); 0; }) 122#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL) 123#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; }) 124static inline void hugetlb_report_meminfo(struct seq_file *m) 125{ 126} 127#define hugetlb_report_node_meminfo(n, buf) 0 128static inline void hugetlb_show_meminfo(void) 129{ 130} 131#define follow_huge_pmd(mm, addr, pmd, write) NULL 132#define follow_huge_pud(mm, addr, pud, write) NULL 133#define prepare_hugepage_range(file, addr, len) (-EINVAL) 134#define pmd_huge(x) 0 135#define pud_huge(x) 0 136#define is_hugepage_only_range(mm, addr, len) 0 137#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; }) 138#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; }) 139#define huge_pte_offset(mm, address) 0 140static inline int dequeue_hwpoisoned_huge_page(struct page *page) 141{ 142 return 0; 143} 144 145static inline bool isolate_huge_page(struct page *page, struct list_head *list) 146{ 147 return false; 148} 149#define putback_active_hugepage(p) do {} while (0) 150#define is_hugepage_active(x) false 151 152static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma, 153 unsigned long address, unsigned long end, pgprot_t newprot) 154{ 155 return 0; 156} 157 158static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb, 159 struct vm_area_struct *vma, unsigned long start, 160 unsigned long end, struct page *ref_page) 161{ 162 BUG(); 163} 164 165static inline void __unmap_hugepage_range(struct mmu_gather *tlb, 166 struct vm_area_struct *vma, unsigned long start, 167 unsigned long end, struct page *ref_page) 168{ 169 BUG(); 170} 171 172#endif /* !CONFIG_HUGETLB_PAGE */ 173 174#define HUGETLB_ANON_FILE "anon_hugepage" 175 176enum { 177 /* 178 * The file will be used as an shm file so shmfs accounting rules 179 * apply 180 */ 181 HUGETLB_SHMFS_INODE = 1, 182 /* 183 * The file is being created on the internal vfs mount and shmfs 184 * accounting rules do not apply 185 */ 186 HUGETLB_ANONHUGE_INODE = 2, 187}; 188 189#ifdef CONFIG_HUGETLBFS 190struct hugetlbfs_sb_info { 191 long max_inodes; /* inodes allowed */ 192 long free_inodes; /* inodes free */ 193 spinlock_t stat_lock; 194 struct hstate *hstate; 195 struct hugepage_subpool *spool; 196}; 197 198static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) 199{ 200 return sb->s_fs_info; 201} 202 203extern const struct file_operations hugetlbfs_file_operations; 204extern const struct vm_operations_struct hugetlb_vm_ops; 205struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct, 206 struct user_struct **user, int creat_flags, 207 int page_size_log); 208 209static inline int is_file_hugepages(struct file *file) 210{ 211 if (file->f_op == &hugetlbfs_file_operations) 212 return 1; 213 if (is_file_shm_hugepages(file)) 214 return 1; 215 216 return 0; 217} 218 219 220#else /* !CONFIG_HUGETLBFS */ 221 222#define is_file_hugepages(file) 0 223static inline struct file * 224hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag, 225 struct user_struct **user, int creat_flags, 226 int page_size_log) 227{ 228 return ERR_PTR(-ENOSYS); 229} 230 231#endif /* !CONFIG_HUGETLBFS */ 232 233#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA 234unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 235 unsigned long len, unsigned long pgoff, 236 unsigned long flags); 237#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */ 238 239#ifdef CONFIG_HUGETLB_PAGE 240 241#define HSTATE_NAME_LEN 32 242/* Defines one hugetlb page size */ 243struct hstate { 244 int next_nid_to_alloc; 245 int next_nid_to_free; 246 unsigned int order; 247 unsigned long mask; 248 unsigned long max_huge_pages; 249 unsigned long nr_huge_pages; 250 unsigned long free_huge_pages; 251 unsigned long resv_huge_pages; 252 unsigned long surplus_huge_pages; 253 unsigned long nr_overcommit_huge_pages; 254 struct list_head hugepage_activelist; 255 struct list_head hugepage_freelists[MAX_NUMNODES]; 256 unsigned int nr_huge_pages_node[MAX_NUMNODES]; 257 unsigned int free_huge_pages_node[MAX_NUMNODES]; 258 unsigned int surplus_huge_pages_node[MAX_NUMNODES]; 259#ifdef CONFIG_CGROUP_HUGETLB 260 /* cgroup control files */ 261 struct cftype cgroup_files[5]; 262#endif 263 char name[HSTATE_NAME_LEN]; 264}; 265 266struct huge_bootmem_page { 267 struct list_head list; 268 struct hstate *hstate; 269#ifdef CONFIG_HIGHMEM 270 phys_addr_t phys; 271#endif 272}; 273 274struct page *alloc_huge_page_node(struct hstate *h, int nid); 275struct page *alloc_huge_page_noerr(struct vm_area_struct *vma, 276 unsigned long addr, int avoid_reserve); 277 278/* arch callback */ 279int __init alloc_bootmem_huge_page(struct hstate *h); 280 281void __init hugetlb_add_hstate(unsigned order); 282struct hstate *size_to_hstate(unsigned long size); 283 284#ifndef HUGE_MAX_HSTATE 285#define HUGE_MAX_HSTATE 1 286#endif 287 288extern struct hstate hstates[HUGE_MAX_HSTATE]; 289extern unsigned int default_hstate_idx; 290 291#define default_hstate (hstates[default_hstate_idx]) 292 293static inline struct hstate *hstate_inode(struct inode *i) 294{ 295 struct hugetlbfs_sb_info *hsb; 296 hsb = HUGETLBFS_SB(i->i_sb); 297 return hsb->hstate; 298} 299 300static inline struct hstate *hstate_file(struct file *f) 301{ 302 return hstate_inode(file_inode(f)); 303} 304 305static inline struct hstate *hstate_sizelog(int page_size_log) 306{ 307 if (!page_size_log) 308 return &default_hstate; 309 return size_to_hstate(1 << page_size_log); 310} 311 312static inline struct hstate *hstate_vma(struct vm_area_struct *vma) 313{ 314 return hstate_file(vma->vm_file); 315} 316 317static inline unsigned long huge_page_size(struct hstate *h) 318{ 319 return (unsigned long)PAGE_SIZE << h->order; 320} 321 322extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma); 323 324extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma); 325 326static inline unsigned long huge_page_mask(struct hstate *h) 327{ 328 return h->mask; 329} 330 331static inline unsigned int huge_page_order(struct hstate *h) 332{ 333 return h->order; 334} 335 336static inline unsigned huge_page_shift(struct hstate *h) 337{ 338 return h->order + PAGE_SHIFT; 339} 340 341static inline unsigned int pages_per_huge_page(struct hstate *h) 342{ 343 return 1 << h->order; 344} 345 346static inline unsigned int blocks_per_huge_page(struct hstate *h) 347{ 348 return huge_page_size(h) / 512; 349} 350 351#include <asm/hugetlb.h> 352 353#ifndef arch_make_huge_pte 354static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, 355 struct page *page, int writable) 356{ 357 return entry; 358} 359#endif 360 361static inline struct hstate *page_hstate(struct page *page) 362{ 363 return size_to_hstate(PAGE_SIZE << compound_order(page)); 364} 365 366static inline unsigned hstate_index_to_shift(unsigned index) 367{ 368 return hstates[index].order + PAGE_SHIFT; 369} 370 371static inline int hstate_index(struct hstate *h) 372{ 373 return h - hstates; 374} 375 376pgoff_t __basepage_index(struct page *page); 377 378/* Return page->index in PAGE_SIZE units */ 379static inline pgoff_t basepage_index(struct page *page) 380{ 381 if (!PageCompound(page)) 382 return page->index; 383 384 return __basepage_index(page); 385} 386 387extern void dissolve_free_huge_pages(unsigned long start_pfn, 388 unsigned long end_pfn); 389int pmd_huge_support(void); 390/* 391 * Currently hugepage migration is enabled only for pmd-based hugepage. 392 * This function will be updated when hugepage migration is more widely 393 * supported. 394 */ 395static inline int hugepage_migration_support(struct hstate *h) 396{ 397 return pmd_huge_support() && (huge_page_shift(h) == PMD_SHIFT); 398} 399 400static inline spinlock_t *huge_pte_lockptr(struct hstate *h, 401 struct mm_struct *mm, pte_t *pte) 402{ 403 if (huge_page_size(h) == PMD_SIZE) 404 return pmd_lockptr(mm, (pmd_t *) pte); 405 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE); 406 return &mm->page_table_lock; 407} 408 409#else /* CONFIG_HUGETLB_PAGE */ 410struct hstate {}; 411#define alloc_huge_page_node(h, nid) NULL 412#define alloc_huge_page_noerr(v, a, r) NULL 413#define alloc_bootmem_huge_page(h) NULL 414#define hstate_file(f) NULL 415#define hstate_sizelog(s) NULL 416#define hstate_vma(v) NULL 417#define hstate_inode(i) NULL 418#define page_hstate(page) NULL 419#define huge_page_size(h) PAGE_SIZE 420#define huge_page_mask(h) PAGE_MASK 421#define vma_kernel_pagesize(v) PAGE_SIZE 422#define vma_mmu_pagesize(v) PAGE_SIZE 423#define huge_page_order(h) 0 424#define huge_page_shift(h) PAGE_SHIFT 425static inline unsigned int pages_per_huge_page(struct hstate *h) 426{ 427 return 1; 428} 429#define hstate_index_to_shift(index) 0 430#define hstate_index(h) 0 431 432static inline pgoff_t basepage_index(struct page *page) 433{ 434 return page->index; 435} 436#define dissolve_free_huge_pages(s, e) do {} while (0) 437#define pmd_huge_support() 0 438#define hugepage_migration_support(h) 0 439 440static inline spinlock_t *huge_pte_lockptr(struct hstate *h, 441 struct mm_struct *mm, pte_t *pte) 442{ 443 return &mm->page_table_lock; 444} 445#endif /* CONFIG_HUGETLB_PAGE */ 446 447static inline spinlock_t *huge_pte_lock(struct hstate *h, 448 struct mm_struct *mm, pte_t *pte) 449{ 450 spinlock_t *ptl; 451 452 ptl = huge_pte_lockptr(h, mm, pte); 453 spin_lock(ptl); 454 return ptl; 455} 456 457#endif /* _LINUX_HUGETLB_H */