Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v4.12 560 lines 16 kB view raw
1#ifndef _LINUX_HUGETLB_H 2#define _LINUX_HUGETLB_H 3 4#include <linux/mm_types.h> 5#include <linux/mmdebug.h> 6#include <linux/fs.h> 7#include <linux/hugetlb_inline.h> 8#include <linux/cgroup.h> 9#include <linux/list.h> 10#include <linux/kref.h> 11#include <asm/pgtable.h> 12 13struct ctl_table; 14struct user_struct; 15struct mmu_gather; 16 17#ifdef CONFIG_HUGETLB_PAGE 18 19#include <linux/mempolicy.h> 20#include <linux/shm.h> 21#include <asm/tlbflush.h> 22 23struct hugepage_subpool { 24 spinlock_t lock; 25 long count; 26 long max_hpages; /* Maximum huge pages or -1 if no maximum. */ 27 long used_hpages; /* Used count against maximum, includes */ 28 /* both alloced and reserved pages. */ 29 struct hstate *hstate; 30 long min_hpages; /* Minimum huge pages or -1 if no minimum. */ 31 long rsv_hpages; /* Pages reserved against global pool to */ 32 /* sasitfy minimum size. */ 33}; 34 35struct resv_map { 36 struct kref refs; 37 spinlock_t lock; 38 struct list_head regions; 39 long adds_in_progress; 40 struct list_head region_cache; 41 long region_cache_count; 42}; 43extern struct resv_map *resv_map_alloc(void); 44void resv_map_release(struct kref *ref); 45 46extern spinlock_t hugetlb_lock; 47extern int hugetlb_max_hstate __read_mostly; 48#define for_each_hstate(h) \ 49 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++) 50 51struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, 52 long min_hpages); 53void hugepage_put_subpool(struct hugepage_subpool *spool); 54 55void reset_vma_resv_huge_pages(struct vm_area_struct *vma); 56int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); 57int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); 58int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); 59 60#ifdef CONFIG_NUMA 61int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, 62 void __user *, size_t *, loff_t *); 63#endif 64 65int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); 66long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, 67 struct page **, struct vm_area_struct **, 68 unsigned long *, unsigned long *, long, unsigned int, 69 int *); 70void unmap_hugepage_range(struct vm_area_struct *, 71 unsigned long, unsigned long, struct page *); 72void __unmap_hugepage_range_final(struct mmu_gather *tlb, 73 struct vm_area_struct *vma, 74 unsigned long start, unsigned long end, 75 struct page *ref_page); 76void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, 77 unsigned long start, unsigned long end, 78 struct page *ref_page); 79void hugetlb_report_meminfo(struct seq_file *); 80int hugetlb_report_node_meminfo(int, char *); 81void hugetlb_show_meminfo(void); 82unsigned long hugetlb_total_pages(void); 83int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 84 unsigned long address, unsigned int flags); 85int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte, 86 struct vm_area_struct *dst_vma, 87 unsigned long dst_addr, 88 unsigned long src_addr, 89 struct page **pagep); 90int hugetlb_reserve_pages(struct inode *inode, long from, long to, 91 struct vm_area_struct *vma, 92 vm_flags_t vm_flags); 93long hugetlb_unreserve_pages(struct inode *inode, long start, long end, 94 long freed); 95int dequeue_hwpoisoned_huge_page(struct page *page); 96bool isolate_huge_page(struct page *page, struct list_head *list); 97void putback_active_hugepage(struct page *page); 98void free_huge_page(struct page *page); 99void hugetlb_fix_reserve_counts(struct inode *inode); 100extern struct mutex *hugetlb_fault_mutex_table; 101u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, 102 struct vm_area_struct *vma, 103 struct address_space *mapping, 104 pgoff_t idx, unsigned long address); 105 106pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); 107 108extern int hugepages_treat_as_movable; 109extern int sysctl_hugetlb_shm_group; 110extern struct list_head huge_boot_pages; 111 112/* arch callbacks */ 113 114pte_t *huge_pte_alloc(struct mm_struct *mm, 115 unsigned long addr, unsigned long sz); 116pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr); 117int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep); 118struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, 119 int write); 120struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, 121 pmd_t *pmd, int flags); 122struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address, 123 pud_t *pud, int flags); 124int pmd_huge(pmd_t pmd); 125int pud_huge(pud_t pud); 126unsigned long hugetlb_change_protection(struct vm_area_struct *vma, 127 unsigned long address, unsigned long end, pgprot_t newprot); 128 129#else /* !CONFIG_HUGETLB_PAGE */ 130 131static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) 132{ 133} 134 135static inline unsigned long hugetlb_total_pages(void) 136{ 137 return 0; 138} 139 140#define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; }) 141#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL) 142#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; }) 143static inline void hugetlb_report_meminfo(struct seq_file *m) 144{ 145} 146#define hugetlb_report_node_meminfo(n, buf) 0 147static inline void hugetlb_show_meminfo(void) 148{ 149} 150#define follow_huge_pmd(mm, addr, pmd, flags) NULL 151#define follow_huge_pud(mm, addr, pud, flags) NULL 152#define prepare_hugepage_range(file, addr, len) (-EINVAL) 153#define pmd_huge(x) 0 154#define pud_huge(x) 0 155#define is_hugepage_only_range(mm, addr, len) 0 156#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; }) 157#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; }) 158#define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \ 159 src_addr, pagep) ({ BUG(); 0; }) 160#define huge_pte_offset(mm, address) 0 161static inline int dequeue_hwpoisoned_huge_page(struct page *page) 162{ 163 return 0; 164} 165 166static inline bool isolate_huge_page(struct page *page, struct list_head *list) 167{ 168 return false; 169} 170#define putback_active_hugepage(p) do {} while (0) 171 172static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma, 173 unsigned long address, unsigned long end, pgprot_t newprot) 174{ 175 return 0; 176} 177 178static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb, 179 struct vm_area_struct *vma, unsigned long start, 180 unsigned long end, struct page *ref_page) 181{ 182 BUG(); 183} 184 185static inline void __unmap_hugepage_range(struct mmu_gather *tlb, 186 struct vm_area_struct *vma, unsigned long start, 187 unsigned long end, struct page *ref_page) 188{ 189 BUG(); 190} 191 192#endif /* !CONFIG_HUGETLB_PAGE */ 193/* 194 * hugepages at page global directory. If arch support 195 * hugepages at pgd level, they need to define this. 196 */ 197#ifndef pgd_huge 198#define pgd_huge(x) 0 199#endif 200#ifndef p4d_huge 201#define p4d_huge(x) 0 202#endif 203 204#ifndef pgd_write 205static inline int pgd_write(pgd_t pgd) 206{ 207 BUG(); 208 return 0; 209} 210#endif 211 212#ifndef pud_write 213static inline int pud_write(pud_t pud) 214{ 215 BUG(); 216 return 0; 217} 218#endif 219 220#ifndef is_hugepd 221/* 222 * Some architectures requires a hugepage directory format that is 223 * required to support multiple hugepage sizes. For example 224 * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables" 225 * introduced the same on powerpc. This allows for a more flexible hugepage 226 * pagetable layout. 227 */ 228typedef struct { unsigned long pd; } hugepd_t; 229#define is_hugepd(hugepd) (0) 230#define __hugepd(x) ((hugepd_t) { (x) }) 231static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr, 232 unsigned pdshift, unsigned long end, 233 int write, struct page **pages, int *nr) 234{ 235 return 0; 236} 237#else 238extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr, 239 unsigned pdshift, unsigned long end, 240 int write, struct page **pages, int *nr); 241#endif 242 243#define HUGETLB_ANON_FILE "anon_hugepage" 244 245enum { 246 /* 247 * The file will be used as an shm file so shmfs accounting rules 248 * apply 249 */ 250 HUGETLB_SHMFS_INODE = 1, 251 /* 252 * The file is being created on the internal vfs mount and shmfs 253 * accounting rules do not apply 254 */ 255 HUGETLB_ANONHUGE_INODE = 2, 256}; 257 258#ifdef CONFIG_HUGETLBFS 259struct hugetlbfs_sb_info { 260 long max_inodes; /* inodes allowed */ 261 long free_inodes; /* inodes free */ 262 spinlock_t stat_lock; 263 struct hstate *hstate; 264 struct hugepage_subpool *spool; 265}; 266 267static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) 268{ 269 return sb->s_fs_info; 270} 271 272extern const struct file_operations hugetlbfs_file_operations; 273extern const struct vm_operations_struct hugetlb_vm_ops; 274struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct, 275 struct user_struct **user, int creat_flags, 276 int page_size_log); 277 278static inline bool is_file_hugepages(struct file *file) 279{ 280 if (file->f_op == &hugetlbfs_file_operations) 281 return true; 282 283 return is_file_shm_hugepages(file); 284} 285 286 287#else /* !CONFIG_HUGETLBFS */ 288 289#define is_file_hugepages(file) false 290static inline struct file * 291hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag, 292 struct user_struct **user, int creat_flags, 293 int page_size_log) 294{ 295 return ERR_PTR(-ENOSYS); 296} 297 298#endif /* !CONFIG_HUGETLBFS */ 299 300#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA 301unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 302 unsigned long len, unsigned long pgoff, 303 unsigned long flags); 304#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */ 305 306#ifdef CONFIG_HUGETLB_PAGE 307 308#define HSTATE_NAME_LEN 32 309/* Defines one hugetlb page size */ 310struct hstate { 311 int next_nid_to_alloc; 312 int next_nid_to_free; 313 unsigned int order; 314 unsigned long mask; 315 unsigned long max_huge_pages; 316 unsigned long nr_huge_pages; 317 unsigned long free_huge_pages; 318 unsigned long resv_huge_pages; 319 unsigned long surplus_huge_pages; 320 unsigned long nr_overcommit_huge_pages; 321 struct list_head hugepage_activelist; 322 struct list_head hugepage_freelists[MAX_NUMNODES]; 323 unsigned int nr_huge_pages_node[MAX_NUMNODES]; 324 unsigned int free_huge_pages_node[MAX_NUMNODES]; 325 unsigned int surplus_huge_pages_node[MAX_NUMNODES]; 326#ifdef CONFIG_CGROUP_HUGETLB 327 /* cgroup control files */ 328 struct cftype cgroup_files[5]; 329#endif 330 char name[HSTATE_NAME_LEN]; 331}; 332 333struct huge_bootmem_page { 334 struct list_head list; 335 struct hstate *hstate; 336#ifdef CONFIG_HIGHMEM 337 phys_addr_t phys; 338#endif 339}; 340 341struct page *alloc_huge_page(struct vm_area_struct *vma, 342 unsigned long addr, int avoid_reserve); 343struct page *alloc_huge_page_node(struct hstate *h, int nid); 344struct page *alloc_huge_page_noerr(struct vm_area_struct *vma, 345 unsigned long addr, int avoid_reserve); 346int huge_add_to_page_cache(struct page *page, struct address_space *mapping, 347 pgoff_t idx); 348 349/* arch callback */ 350int __init alloc_bootmem_huge_page(struct hstate *h); 351 352void __init hugetlb_bad_size(void); 353void __init hugetlb_add_hstate(unsigned order); 354struct hstate *size_to_hstate(unsigned long size); 355 356#ifndef HUGE_MAX_HSTATE 357#define HUGE_MAX_HSTATE 1 358#endif 359 360extern struct hstate hstates[HUGE_MAX_HSTATE]; 361extern unsigned int default_hstate_idx; 362 363#define default_hstate (hstates[default_hstate_idx]) 364 365static inline struct hstate *hstate_inode(struct inode *i) 366{ 367 return HUGETLBFS_SB(i->i_sb)->hstate; 368} 369 370static inline struct hstate *hstate_file(struct file *f) 371{ 372 return hstate_inode(file_inode(f)); 373} 374 375static inline struct hstate *hstate_sizelog(int page_size_log) 376{ 377 if (!page_size_log) 378 return &default_hstate; 379 380 return size_to_hstate(1UL << page_size_log); 381} 382 383static inline struct hstate *hstate_vma(struct vm_area_struct *vma) 384{ 385 return hstate_file(vma->vm_file); 386} 387 388static inline unsigned long huge_page_size(struct hstate *h) 389{ 390 return (unsigned long)PAGE_SIZE << h->order; 391} 392 393extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma); 394 395extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma); 396 397static inline unsigned long huge_page_mask(struct hstate *h) 398{ 399 return h->mask; 400} 401 402static inline unsigned int huge_page_order(struct hstate *h) 403{ 404 return h->order; 405} 406 407static inline unsigned huge_page_shift(struct hstate *h) 408{ 409 return h->order + PAGE_SHIFT; 410} 411 412static inline bool hstate_is_gigantic(struct hstate *h) 413{ 414 return huge_page_order(h) >= MAX_ORDER; 415} 416 417static inline unsigned int pages_per_huge_page(struct hstate *h) 418{ 419 return 1 << h->order; 420} 421 422static inline unsigned int blocks_per_huge_page(struct hstate *h) 423{ 424 return huge_page_size(h) / 512; 425} 426 427#include <asm/hugetlb.h> 428 429#ifndef arch_make_huge_pte 430static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, 431 struct page *page, int writable) 432{ 433 return entry; 434} 435#endif 436 437static inline struct hstate *page_hstate(struct page *page) 438{ 439 VM_BUG_ON_PAGE(!PageHuge(page), page); 440 return size_to_hstate(PAGE_SIZE << compound_order(page)); 441} 442 443static inline unsigned hstate_index_to_shift(unsigned index) 444{ 445 return hstates[index].order + PAGE_SHIFT; 446} 447 448static inline int hstate_index(struct hstate *h) 449{ 450 return h - hstates; 451} 452 453pgoff_t __basepage_index(struct page *page); 454 455/* Return page->index in PAGE_SIZE units */ 456static inline pgoff_t basepage_index(struct page *page) 457{ 458 if (!PageCompound(page)) 459 return page->index; 460 461 return __basepage_index(page); 462} 463 464extern int dissolve_free_huge_pages(unsigned long start_pfn, 465 unsigned long end_pfn); 466static inline bool hugepage_migration_supported(struct hstate *h) 467{ 468#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION 469 return huge_page_shift(h) == PMD_SHIFT; 470#else 471 return false; 472#endif 473} 474 475static inline spinlock_t *huge_pte_lockptr(struct hstate *h, 476 struct mm_struct *mm, pte_t *pte) 477{ 478 if (huge_page_size(h) == PMD_SIZE) 479 return pmd_lockptr(mm, (pmd_t *) pte); 480 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE); 481 return &mm->page_table_lock; 482} 483 484#ifndef hugepages_supported 485/* 486 * Some platform decide whether they support huge pages at boot 487 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0 488 * when there is no such support 489 */ 490#define hugepages_supported() (HPAGE_SHIFT != 0) 491#endif 492 493void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm); 494 495static inline void hugetlb_count_add(long l, struct mm_struct *mm) 496{ 497 atomic_long_add(l, &mm->hugetlb_usage); 498} 499 500static inline void hugetlb_count_sub(long l, struct mm_struct *mm) 501{ 502 atomic_long_sub(l, &mm->hugetlb_usage); 503} 504#else /* CONFIG_HUGETLB_PAGE */ 505struct hstate {}; 506#define alloc_huge_page(v, a, r) NULL 507#define alloc_huge_page_node(h, nid) NULL 508#define alloc_huge_page_noerr(v, a, r) NULL 509#define alloc_bootmem_huge_page(h) NULL 510#define hstate_file(f) NULL 511#define hstate_sizelog(s) NULL 512#define hstate_vma(v) NULL 513#define hstate_inode(i) NULL 514#define page_hstate(page) NULL 515#define huge_page_size(h) PAGE_SIZE 516#define huge_page_mask(h) PAGE_MASK 517#define vma_kernel_pagesize(v) PAGE_SIZE 518#define vma_mmu_pagesize(v) PAGE_SIZE 519#define huge_page_order(h) 0 520#define huge_page_shift(h) PAGE_SHIFT 521static inline unsigned int pages_per_huge_page(struct hstate *h) 522{ 523 return 1; 524} 525#define hstate_index_to_shift(index) 0 526#define hstate_index(h) 0 527 528static inline pgoff_t basepage_index(struct page *page) 529{ 530 return page->index; 531} 532#define dissolve_free_huge_pages(s, e) 0 533#define hugepage_migration_supported(h) false 534 535static inline spinlock_t *huge_pte_lockptr(struct hstate *h, 536 struct mm_struct *mm, pte_t *pte) 537{ 538 return &mm->page_table_lock; 539} 540 541static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m) 542{ 543} 544 545static inline void hugetlb_count_sub(long l, struct mm_struct *mm) 546{ 547} 548#endif /* CONFIG_HUGETLB_PAGE */ 549 550static inline spinlock_t *huge_pte_lock(struct hstate *h, 551 struct mm_struct *mm, pte_t *pte) 552{ 553 spinlock_t *ptl; 554 555 ptl = huge_pte_lockptr(h, mm, pte); 556 spin_lock(ptl); 557 return ptl; 558} 559 560#endif /* _LINUX_HUGETLB_H */