at v5.13 28 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_HUGETLB_H 3#define _LINUX_HUGETLB_H 4 5#include <linux/mm_types.h> 6#include <linux/mmdebug.h> 7#include <linux/fs.h> 8#include <linux/hugetlb_inline.h> 9#include <linux/cgroup.h> 10#include <linux/list.h> 11#include <linux/kref.h> 12#include <linux/pgtable.h> 13#include <linux/gfp.h> 14#include <linux/userfaultfd_k.h> 15 16struct ctl_table; 17struct user_struct; 18struct mmu_gather; 19 20#ifndef is_hugepd 21typedef struct { unsigned long pd; } hugepd_t; 22#define is_hugepd(hugepd) (0) 23#define __hugepd(x) ((hugepd_t) { (x) }) 24#endif 25 26#ifdef CONFIG_HUGETLB_PAGE 27 28#include <linux/mempolicy.h> 29#include <linux/shm.h> 30#include <asm/tlbflush.h> 31 32struct hugepage_subpool { 33 spinlock_t lock; 34 long count; 35 long max_hpages; /* Maximum huge pages or -1 if no maximum. */ 36 long used_hpages; /* Used count against maximum, includes */ 37 /* both alloced and reserved pages. */ 38 struct hstate *hstate; 39 long min_hpages; /* Minimum huge pages or -1 if no minimum. */ 40 long rsv_hpages; /* Pages reserved against global pool to */ 41 /* satisfy minimum size. */ 42}; 43 44struct resv_map { 45 struct kref refs; 46 spinlock_t lock; 47 struct list_head regions; 48 long adds_in_progress; 49 struct list_head region_cache; 50 long region_cache_count; 51#ifdef CONFIG_CGROUP_HUGETLB 52 /* 53 * On private mappings, the counter to uncharge reservations is stored 54 * here. If these fields are 0, then either the mapping is shared, or 55 * cgroup accounting is disabled for this resv_map. 56 */ 57 struct page_counter *reservation_counter; 58 unsigned long pages_per_hpage; 59 struct cgroup_subsys_state *css; 60#endif 61}; 62 63/* 64 * Region tracking -- allows tracking of reservations and instantiated pages 65 * across the pages in a mapping. 66 * 67 * The region data structures are embedded into a resv_map and protected 68 * by a resv_map's lock. The set of regions within the resv_map represent 69 * reservations for huge pages, or huge pages that have already been 70 * instantiated within the map. The from and to elements are huge page 71 * indicies into the associated mapping. from indicates the starting index 72 * of the region. to represents the first index past the end of the region. 73 * 74 * For example, a file region structure with from == 0 and to == 4 represents 75 * four huge pages in a mapping. It is important to note that the to element 76 * represents the first element past the end of the region. This is used in 77 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region. 78 * 79 * Interval notation of the form [from, to) will be used to indicate that 80 * the endpoint from is inclusive and to is exclusive. 81 */ 82struct file_region { 83 struct list_head link; 84 long from; 85 long to; 86#ifdef CONFIG_CGROUP_HUGETLB 87 /* 88 * On shared mappings, each reserved region appears as a struct 89 * file_region in resv_map. These fields hold the info needed to 90 * uncharge each reservation. 91 */ 92 struct page_counter *reservation_counter; 93 struct cgroup_subsys_state *css; 94#endif 95}; 96 97extern struct resv_map *resv_map_alloc(void); 98void resv_map_release(struct kref *ref); 99 100extern spinlock_t hugetlb_lock; 101extern int hugetlb_max_hstate __read_mostly; 102#define for_each_hstate(h) \ 103 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++) 104 105struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, 106 long min_hpages); 107void hugepage_put_subpool(struct hugepage_subpool *spool); 108 109void reset_vma_resv_huge_pages(struct vm_area_struct *vma); 110int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *); 111int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *, 112 loff_t *); 113int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *, 114 loff_t *); 115int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *, 116 loff_t *); 117 118int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); 119long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, 120 struct page **, struct vm_area_struct **, 121 unsigned long *, unsigned long *, long, unsigned int, 122 int *); 123void unmap_hugepage_range(struct vm_area_struct *, 124 unsigned long, unsigned long, struct page *); 125void __unmap_hugepage_range_final(struct mmu_gather *tlb, 126 struct vm_area_struct *vma, 127 unsigned long start, unsigned long end, 128 struct page *ref_page); 129void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, 130 unsigned long start, unsigned long end, 131 struct page *ref_page); 132void hugetlb_report_meminfo(struct seq_file *); 133int hugetlb_report_node_meminfo(char *buf, int len, int nid); 134void hugetlb_show_meminfo(void); 135unsigned long hugetlb_total_pages(void); 136vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 137 unsigned long address, unsigned int flags); 138#ifdef CONFIG_USERFAULTFD 139int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte, 140 struct vm_area_struct *dst_vma, 141 unsigned long dst_addr, 142 unsigned long src_addr, 143 enum mcopy_atomic_mode mode, 144 struct page **pagep); 145#endif /* CONFIG_USERFAULTFD */ 146bool hugetlb_reserve_pages(struct inode *inode, long from, long to, 147 struct vm_area_struct *vma, 148 vm_flags_t vm_flags); 149long hugetlb_unreserve_pages(struct inode *inode, long start, long end, 150 long freed); 151bool isolate_huge_page(struct page *page, struct list_head *list); 152int get_hwpoison_huge_page(struct page *page, bool *hugetlb); 153void putback_active_hugepage(struct page *page); 154void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason); 155void free_huge_page(struct page *page); 156void hugetlb_fix_reserve_counts(struct inode *inode); 157extern struct mutex *hugetlb_fault_mutex_table; 158u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx); 159 160pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, 161 unsigned long addr, pud_t *pud); 162 163struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage); 164 165extern int sysctl_hugetlb_shm_group; 166extern struct list_head huge_boot_pages; 167 168/* arch callbacks */ 169 170pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, 171 unsigned long addr, unsigned long sz); 172pte_t *huge_pte_offset(struct mm_struct *mm, 173 unsigned long addr, unsigned long sz); 174int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, 175 unsigned long *addr, pte_t *ptep); 176void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, 177 unsigned long *start, unsigned long *end); 178struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, 179 int write); 180struct page *follow_huge_pd(struct vm_area_struct *vma, 181 unsigned long address, hugepd_t hpd, 182 int flags, int pdshift); 183struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, 184 pmd_t *pmd, int flags); 185struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address, 186 pud_t *pud, int flags); 187struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address, 188 pgd_t *pgd, int flags); 189 190int pmd_huge(pmd_t pmd); 191int pud_huge(pud_t pud); 192unsigned long hugetlb_change_protection(struct vm_area_struct *vma, 193 unsigned long address, unsigned long end, pgprot_t newprot); 194 195bool is_hugetlb_entry_migration(pte_t pte); 196void hugetlb_unshare_all_pmds(struct vm_area_struct *vma); 197 198#else /* !CONFIG_HUGETLB_PAGE */ 199 200static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) 201{ 202} 203 204static inline unsigned long hugetlb_total_pages(void) 205{ 206 return 0; 207} 208 209static inline struct address_space *hugetlb_page_mapping_lock_write( 210 struct page *hpage) 211{ 212 return NULL; 213} 214 215static inline int huge_pmd_unshare(struct mm_struct *mm, 216 struct vm_area_struct *vma, 217 unsigned long *addr, pte_t *ptep) 218{ 219 return 0; 220} 221 222static inline void adjust_range_if_pmd_sharing_possible( 223 struct vm_area_struct *vma, 224 unsigned long *start, unsigned long *end) 225{ 226} 227 228static inline long follow_hugetlb_page(struct mm_struct *mm, 229 struct vm_area_struct *vma, struct page **pages, 230 struct vm_area_struct **vmas, unsigned long *position, 231 unsigned long *nr_pages, long i, unsigned int flags, 232 int *nonblocking) 233{ 234 BUG(); 235 return 0; 236} 237 238static inline struct page *follow_huge_addr(struct mm_struct *mm, 239 unsigned long address, int write) 240{ 241 return ERR_PTR(-EINVAL); 242} 243 244static inline int copy_hugetlb_page_range(struct mm_struct *dst, 245 struct mm_struct *src, struct vm_area_struct *vma) 246{ 247 BUG(); 248 return 0; 249} 250 251static inline void hugetlb_report_meminfo(struct seq_file *m) 252{ 253} 254 255static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid) 256{ 257 return 0; 258} 259 260static inline void hugetlb_show_meminfo(void) 261{ 262} 263 264static inline struct page *follow_huge_pd(struct vm_area_struct *vma, 265 unsigned long address, hugepd_t hpd, int flags, 266 int pdshift) 267{ 268 return NULL; 269} 270 271static inline struct page *follow_huge_pmd(struct mm_struct *mm, 272 unsigned long address, pmd_t *pmd, int flags) 273{ 274 return NULL; 275} 276 277static inline struct page *follow_huge_pud(struct mm_struct *mm, 278 unsigned long address, pud_t *pud, int flags) 279{ 280 return NULL; 281} 282 283static inline struct page *follow_huge_pgd(struct mm_struct *mm, 284 unsigned long address, pgd_t *pgd, int flags) 285{ 286 return NULL; 287} 288 289static inline int prepare_hugepage_range(struct file *file, 290 unsigned long addr, unsigned long len) 291{ 292 return -EINVAL; 293} 294 295static inline int pmd_huge(pmd_t pmd) 296{ 297 return 0; 298} 299 300static inline int pud_huge(pud_t pud) 301{ 302 return 0; 303} 304 305static inline int is_hugepage_only_range(struct mm_struct *mm, 306 unsigned long addr, unsigned long len) 307{ 308 return 0; 309} 310 311static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, 312 unsigned long addr, unsigned long end, 313 unsigned long floor, unsigned long ceiling) 314{ 315 BUG(); 316} 317 318#ifdef CONFIG_USERFAULTFD 319static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, 320 pte_t *dst_pte, 321 struct vm_area_struct *dst_vma, 322 unsigned long dst_addr, 323 unsigned long src_addr, 324 enum mcopy_atomic_mode mode, 325 struct page **pagep) 326{ 327 BUG(); 328 return 0; 329} 330#endif /* CONFIG_USERFAULTFD */ 331 332static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, 333 unsigned long sz) 334{ 335 return NULL; 336} 337 338static inline bool isolate_huge_page(struct page *page, struct list_head *list) 339{ 340 return false; 341} 342 343static inline int get_hwpoison_huge_page(struct page *page, bool *hugetlb) 344{ 345 return 0; 346} 347 348static inline void putback_active_hugepage(struct page *page) 349{ 350} 351 352static inline void move_hugetlb_state(struct page *oldpage, 353 struct page *newpage, int reason) 354{ 355} 356 357static inline unsigned long hugetlb_change_protection( 358 struct vm_area_struct *vma, unsigned long address, 359 unsigned long end, pgprot_t newprot) 360{ 361 return 0; 362} 363 364static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb, 365 struct vm_area_struct *vma, unsigned long start, 366 unsigned long end, struct page *ref_page) 367{ 368 BUG(); 369} 370 371static inline void __unmap_hugepage_range(struct mmu_gather *tlb, 372 struct vm_area_struct *vma, unsigned long start, 373 unsigned long end, struct page *ref_page) 374{ 375 BUG(); 376} 377 378static inline vm_fault_t hugetlb_fault(struct mm_struct *mm, 379 struct vm_area_struct *vma, unsigned long address, 380 unsigned int flags) 381{ 382 BUG(); 383 return 0; 384} 385 386static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { } 387 388#endif /* !CONFIG_HUGETLB_PAGE */ 389/* 390 * hugepages at page global directory. If arch support 391 * hugepages at pgd level, they need to define this. 392 */ 393#ifndef pgd_huge 394#define pgd_huge(x) 0 395#endif 396#ifndef p4d_huge 397#define p4d_huge(x) 0 398#endif 399 400#ifndef pgd_write 401static inline int pgd_write(pgd_t pgd) 402{ 403 BUG(); 404 return 0; 405} 406#endif 407 408#define HUGETLB_ANON_FILE "anon_hugepage" 409 410enum { 411 /* 412 * The file will be used as an shm file so shmfs accounting rules 413 * apply 414 */ 415 HUGETLB_SHMFS_INODE = 1, 416 /* 417 * The file is being created on the internal vfs mount and shmfs 418 * accounting rules do not apply 419 */ 420 HUGETLB_ANONHUGE_INODE = 2, 421}; 422 423#ifdef CONFIG_HUGETLBFS 424struct hugetlbfs_sb_info { 425 long max_inodes; /* inodes allowed */ 426 long free_inodes; /* inodes free */ 427 spinlock_t stat_lock; 428 struct hstate *hstate; 429 struct hugepage_subpool *spool; 430 kuid_t uid; 431 kgid_t gid; 432 umode_t mode; 433}; 434 435static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) 436{ 437 return sb->s_fs_info; 438} 439 440struct hugetlbfs_inode_info { 441 struct shared_policy policy; 442 struct inode vfs_inode; 443 unsigned int seals; 444}; 445 446static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode) 447{ 448 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode); 449} 450 451extern const struct file_operations hugetlbfs_file_operations; 452extern const struct vm_operations_struct hugetlb_vm_ops; 453struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct, 454 struct user_struct **user, int creat_flags, 455 int page_size_log); 456 457static inline bool is_file_hugepages(struct file *file) 458{ 459 if (file->f_op == &hugetlbfs_file_operations) 460 return true; 461 462 return is_file_shm_hugepages(file); 463} 464 465static inline struct hstate *hstate_inode(struct inode *i) 466{ 467 return HUGETLBFS_SB(i->i_sb)->hstate; 468} 469#else /* !CONFIG_HUGETLBFS */ 470 471#define is_file_hugepages(file) false 472static inline struct file * 473hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag, 474 struct user_struct **user, int creat_flags, 475 int page_size_log) 476{ 477 return ERR_PTR(-ENOSYS); 478} 479 480static inline struct hstate *hstate_inode(struct inode *i) 481{ 482 return NULL; 483} 484#endif /* !CONFIG_HUGETLBFS */ 485 486#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA 487unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 488 unsigned long len, unsigned long pgoff, 489 unsigned long flags); 490#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */ 491 492/* 493 * huegtlb page specific state flags. These flags are located in page.private 494 * of the hugetlb head page. Functions created via the below macros should be 495 * used to manipulate these flags. 496 * 497 * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at 498 * allocation time. Cleared when page is fully instantiated. Free 499 * routine checks flag to restore a reservation on error paths. 500 * Synchronization: Examined or modified by code that knows it has 501 * the only reference to page. i.e. After allocation but before use 502 * or when the page is being freed. 503 * HPG_migratable - Set after a newly allocated page is added to the page 504 * cache and/or page tables. Indicates the page is a candidate for 505 * migration. 506 * Synchronization: Initially set after new page allocation with no 507 * locking. When examined and modified during migration processing 508 * (isolate, migrate, putback) the hugetlb_lock is held. 509 * HPG_temporary - - Set on a page that is temporarily allocated from the buddy 510 * allocator. Typically used for migration target pages when no pages 511 * are available in the pool. The hugetlb free page path will 512 * immediately free pages with this flag set to the buddy allocator. 513 * Synchronization: Can be set after huge page allocation from buddy when 514 * code knows it has only reference. All other examinations and 515 * modifications require hugetlb_lock. 516 * HPG_freed - Set when page is on the free lists. 517 * Synchronization: hugetlb_lock held for examination and modification. 518 */ 519enum hugetlb_page_flags { 520 HPG_restore_reserve = 0, 521 HPG_migratable, 522 HPG_temporary, 523 HPG_freed, 524 __NR_HPAGEFLAGS, 525}; 526 527/* 528 * Macros to create test, set and clear function definitions for 529 * hugetlb specific page flags. 530 */ 531#ifdef CONFIG_HUGETLB_PAGE 532#define TESTHPAGEFLAG(uname, flname) \ 533static inline int HPage##uname(struct page *page) \ 534 { return test_bit(HPG_##flname, &(page->private)); } 535 536#define SETHPAGEFLAG(uname, flname) \ 537static inline void SetHPage##uname(struct page *page) \ 538 { set_bit(HPG_##flname, &(page->private)); } 539 540#define CLEARHPAGEFLAG(uname, flname) \ 541static inline void ClearHPage##uname(struct page *page) \ 542 { clear_bit(HPG_##flname, &(page->private)); } 543#else 544#define TESTHPAGEFLAG(uname, flname) \ 545static inline int HPage##uname(struct page *page) \ 546 { return 0; } 547 548#define SETHPAGEFLAG(uname, flname) \ 549static inline void SetHPage##uname(struct page *page) \ 550 { } 551 552#define CLEARHPAGEFLAG(uname, flname) \ 553static inline void ClearHPage##uname(struct page *page) \ 554 { } 555#endif 556 557#define HPAGEFLAG(uname, flname) \ 558 TESTHPAGEFLAG(uname, flname) \ 559 SETHPAGEFLAG(uname, flname) \ 560 CLEARHPAGEFLAG(uname, flname) \ 561 562/* 563 * Create functions associated with hugetlb page flags 564 */ 565HPAGEFLAG(RestoreReserve, restore_reserve) 566HPAGEFLAG(Migratable, migratable) 567HPAGEFLAG(Temporary, temporary) 568HPAGEFLAG(Freed, freed) 569 570#ifdef CONFIG_HUGETLB_PAGE 571 572#define HSTATE_NAME_LEN 32 573/* Defines one hugetlb page size */ 574struct hstate { 575 struct mutex resize_lock; 576 int next_nid_to_alloc; 577 int next_nid_to_free; 578 unsigned int order; 579 unsigned long mask; 580 unsigned long max_huge_pages; 581 unsigned long nr_huge_pages; 582 unsigned long free_huge_pages; 583 unsigned long resv_huge_pages; 584 unsigned long surplus_huge_pages; 585 unsigned long nr_overcommit_huge_pages; 586 struct list_head hugepage_activelist; 587 struct list_head hugepage_freelists[MAX_NUMNODES]; 588 unsigned int nr_huge_pages_node[MAX_NUMNODES]; 589 unsigned int free_huge_pages_node[MAX_NUMNODES]; 590 unsigned int surplus_huge_pages_node[MAX_NUMNODES]; 591#ifdef CONFIG_CGROUP_HUGETLB 592 /* cgroup control files */ 593 struct cftype cgroup_files_dfl[7]; 594 struct cftype cgroup_files_legacy[9]; 595#endif 596 char name[HSTATE_NAME_LEN]; 597}; 598 599struct huge_bootmem_page { 600 struct list_head list; 601 struct hstate *hstate; 602}; 603 604int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list); 605struct page *alloc_huge_page(struct vm_area_struct *vma, 606 unsigned long addr, int avoid_reserve); 607struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, 608 nodemask_t *nmask, gfp_t gfp_mask); 609struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, 610 unsigned long address); 611int huge_add_to_page_cache(struct page *page, struct address_space *mapping, 612 pgoff_t idx); 613void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma, 614 unsigned long address, struct page *page); 615 616/* arch callback */ 617int __init __alloc_bootmem_huge_page(struct hstate *h); 618int __init alloc_bootmem_huge_page(struct hstate *h); 619 620void __init hugetlb_add_hstate(unsigned order); 621bool __init arch_hugetlb_valid_size(unsigned long size); 622struct hstate *size_to_hstate(unsigned long size); 623 624#ifndef HUGE_MAX_HSTATE 625#define HUGE_MAX_HSTATE 1 626#endif 627 628extern struct hstate hstates[HUGE_MAX_HSTATE]; 629extern unsigned int default_hstate_idx; 630 631#define default_hstate (hstates[default_hstate_idx]) 632 633/* 634 * hugetlb page subpool pointer located in hpage[1].private 635 */ 636static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage) 637{ 638 return (struct hugepage_subpool *)(hpage+1)->private; 639} 640 641static inline void hugetlb_set_page_subpool(struct page *hpage, 642 struct hugepage_subpool *subpool) 643{ 644 set_page_private(hpage+1, (unsigned long)subpool); 645} 646 647static inline struct hstate *hstate_file(struct file *f) 648{ 649 return hstate_inode(file_inode(f)); 650} 651 652static inline struct hstate *hstate_sizelog(int page_size_log) 653{ 654 if (!page_size_log) 655 return &default_hstate; 656 657 return size_to_hstate(1UL << page_size_log); 658} 659 660static inline struct hstate *hstate_vma(struct vm_area_struct *vma) 661{ 662 return hstate_file(vma->vm_file); 663} 664 665static inline unsigned long huge_page_size(struct hstate *h) 666{ 667 return (unsigned long)PAGE_SIZE << h->order; 668} 669 670extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma); 671 672extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma); 673 674static inline unsigned long huge_page_mask(struct hstate *h) 675{ 676 return h->mask; 677} 678 679static inline unsigned int huge_page_order(struct hstate *h) 680{ 681 return h->order; 682} 683 684static inline unsigned huge_page_shift(struct hstate *h) 685{ 686 return h->order + PAGE_SHIFT; 687} 688 689static inline bool hstate_is_gigantic(struct hstate *h) 690{ 691 return huge_page_order(h) >= MAX_ORDER; 692} 693 694static inline unsigned int pages_per_huge_page(struct hstate *h) 695{ 696 return 1 << h->order; 697} 698 699static inline unsigned int blocks_per_huge_page(struct hstate *h) 700{ 701 return huge_page_size(h) / 512; 702} 703 704#include <asm/hugetlb.h> 705 706#ifndef is_hugepage_only_range 707static inline int is_hugepage_only_range(struct mm_struct *mm, 708 unsigned long addr, unsigned long len) 709{ 710 return 0; 711} 712#define is_hugepage_only_range is_hugepage_only_range 713#endif 714 715#ifndef arch_clear_hugepage_flags 716static inline void arch_clear_hugepage_flags(struct page *page) { } 717#define arch_clear_hugepage_flags arch_clear_hugepage_flags 718#endif 719 720#ifndef arch_make_huge_pte 721static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, 722 struct page *page, int writable) 723{ 724 return entry; 725} 726#endif 727 728static inline struct hstate *page_hstate(struct page *page) 729{ 730 VM_BUG_ON_PAGE(!PageHuge(page), page); 731 return size_to_hstate(page_size(page)); 732} 733 734static inline unsigned hstate_index_to_shift(unsigned index) 735{ 736 return hstates[index].order + PAGE_SHIFT; 737} 738 739static inline int hstate_index(struct hstate *h) 740{ 741 return h - hstates; 742} 743 744extern int dissolve_free_huge_page(struct page *page); 745extern int dissolve_free_huge_pages(unsigned long start_pfn, 746 unsigned long end_pfn); 747 748#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION 749#ifndef arch_hugetlb_migration_supported 750static inline bool arch_hugetlb_migration_supported(struct hstate *h) 751{ 752 if ((huge_page_shift(h) == PMD_SHIFT) || 753 (huge_page_shift(h) == PUD_SHIFT) || 754 (huge_page_shift(h) == PGDIR_SHIFT)) 755 return true; 756 else 757 return false; 758} 759#endif 760#else 761static inline bool arch_hugetlb_migration_supported(struct hstate *h) 762{ 763 return false; 764} 765#endif 766 767static inline bool hugepage_migration_supported(struct hstate *h) 768{ 769 return arch_hugetlb_migration_supported(h); 770} 771 772/* 773 * Movability check is different as compared to migration check. 774 * It determines whether or not a huge page should be placed on 775 * movable zone or not. Movability of any huge page should be 776 * required only if huge page size is supported for migration. 777 * There wont be any reason for the huge page to be movable if 778 * it is not migratable to start with. Also the size of the huge 779 * page should be large enough to be placed under a movable zone 780 * and still feasible enough to be migratable. Just the presence 781 * in movable zone does not make the migration feasible. 782 * 783 * So even though large huge page sizes like the gigantic ones 784 * are migratable they should not be movable because its not 785 * feasible to migrate them from movable zone. 786 */ 787static inline bool hugepage_movable_supported(struct hstate *h) 788{ 789 if (!hugepage_migration_supported(h)) 790 return false; 791 792 if (hstate_is_gigantic(h)) 793 return false; 794 return true; 795} 796 797/* Movability of hugepages depends on migration support. */ 798static inline gfp_t htlb_alloc_mask(struct hstate *h) 799{ 800 if (hugepage_movable_supported(h)) 801 return GFP_HIGHUSER_MOVABLE; 802 else 803 return GFP_HIGHUSER; 804} 805 806static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask) 807{ 808 gfp_t modified_mask = htlb_alloc_mask(h); 809 810 /* Some callers might want to enforce node */ 811 modified_mask |= (gfp_mask & __GFP_THISNODE); 812 813 modified_mask |= (gfp_mask & __GFP_NOWARN); 814 815 return modified_mask; 816} 817 818static inline spinlock_t *huge_pte_lockptr(struct hstate *h, 819 struct mm_struct *mm, pte_t *pte) 820{ 821 if (huge_page_size(h) == PMD_SIZE) 822 return pmd_lockptr(mm, (pmd_t *) pte); 823 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE); 824 return &mm->page_table_lock; 825} 826 827#ifndef hugepages_supported 828/* 829 * Some platform decide whether they support huge pages at boot 830 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0 831 * when there is no such support 832 */ 833#define hugepages_supported() (HPAGE_SHIFT != 0) 834#endif 835 836void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm); 837 838static inline void hugetlb_count_add(long l, struct mm_struct *mm) 839{ 840 atomic_long_add(l, &mm->hugetlb_usage); 841} 842 843static inline void hugetlb_count_sub(long l, struct mm_struct *mm) 844{ 845 atomic_long_sub(l, &mm->hugetlb_usage); 846} 847 848#ifndef set_huge_swap_pte_at 849static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, 850 pte_t *ptep, pte_t pte, unsigned long sz) 851{ 852 set_huge_pte_at(mm, addr, ptep, pte); 853} 854#endif 855 856#ifndef huge_ptep_modify_prot_start 857#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start 858static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, 859 unsigned long addr, pte_t *ptep) 860{ 861 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); 862} 863#endif 864 865#ifndef huge_ptep_modify_prot_commit 866#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit 867static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, 868 unsigned long addr, pte_t *ptep, 869 pte_t old_pte, pte_t pte) 870{ 871 set_huge_pte_at(vma->vm_mm, addr, ptep, pte); 872} 873#endif 874 875#else /* CONFIG_HUGETLB_PAGE */ 876struct hstate {}; 877 878static inline int isolate_or_dissolve_huge_page(struct page *page, 879 struct list_head *list) 880{ 881 return -ENOMEM; 882} 883 884static inline struct page *alloc_huge_page(struct vm_area_struct *vma, 885 unsigned long addr, 886 int avoid_reserve) 887{ 888 return NULL; 889} 890 891static inline struct page * 892alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, 893 nodemask_t *nmask, gfp_t gfp_mask) 894{ 895 return NULL; 896} 897 898static inline struct page *alloc_huge_page_vma(struct hstate *h, 899 struct vm_area_struct *vma, 900 unsigned long address) 901{ 902 return NULL; 903} 904 905static inline int __alloc_bootmem_huge_page(struct hstate *h) 906{ 907 return 0; 908} 909 910static inline struct hstate *hstate_file(struct file *f) 911{ 912 return NULL; 913} 914 915static inline struct hstate *hstate_sizelog(int page_size_log) 916{ 917 return NULL; 918} 919 920static inline struct hstate *hstate_vma(struct vm_area_struct *vma) 921{ 922 return NULL; 923} 924 925static inline struct hstate *page_hstate(struct page *page) 926{ 927 return NULL; 928} 929 930static inline unsigned long huge_page_size(struct hstate *h) 931{ 932 return PAGE_SIZE; 933} 934 935static inline unsigned long huge_page_mask(struct hstate *h) 936{ 937 return PAGE_MASK; 938} 939 940static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) 941{ 942 return PAGE_SIZE; 943} 944 945static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) 946{ 947 return PAGE_SIZE; 948} 949 950static inline unsigned int huge_page_order(struct hstate *h) 951{ 952 return 0; 953} 954 955static inline unsigned int huge_page_shift(struct hstate *h) 956{ 957 return PAGE_SHIFT; 958} 959 960static inline bool hstate_is_gigantic(struct hstate *h) 961{ 962 return false; 963} 964 965static inline unsigned int pages_per_huge_page(struct hstate *h) 966{ 967 return 1; 968} 969 970static inline unsigned hstate_index_to_shift(unsigned index) 971{ 972 return 0; 973} 974 975static inline int hstate_index(struct hstate *h) 976{ 977 return 0; 978} 979 980static inline int dissolve_free_huge_page(struct page *page) 981{ 982 return 0; 983} 984 985static inline int dissolve_free_huge_pages(unsigned long start_pfn, 986 unsigned long end_pfn) 987{ 988 return 0; 989} 990 991static inline bool hugepage_migration_supported(struct hstate *h) 992{ 993 return false; 994} 995 996static inline bool hugepage_movable_supported(struct hstate *h) 997{ 998 return false; 999} 1000 1001static inline gfp_t htlb_alloc_mask(struct hstate *h) 1002{ 1003 return 0; 1004} 1005 1006static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask) 1007{ 1008 return 0; 1009} 1010 1011static inline spinlock_t *huge_pte_lockptr(struct hstate *h, 1012 struct mm_struct *mm, pte_t *pte) 1013{ 1014 return &mm->page_table_lock; 1015} 1016 1017static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m) 1018{ 1019} 1020 1021static inline void hugetlb_count_sub(long l, struct mm_struct *mm) 1022{ 1023} 1024 1025static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, 1026 pte_t *ptep, pte_t pte, unsigned long sz) 1027{ 1028} 1029#endif /* CONFIG_HUGETLB_PAGE */ 1030 1031static inline spinlock_t *huge_pte_lock(struct hstate *h, 1032 struct mm_struct *mm, pte_t *pte) 1033{ 1034 spinlock_t *ptl; 1035 1036 ptl = huge_pte_lockptr(h, mm, pte); 1037 spin_lock(ptl); 1038 return ptl; 1039} 1040 1041#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA) 1042extern void __init hugetlb_cma_reserve(int order); 1043extern void __init hugetlb_cma_check(void); 1044#else 1045static inline __init void hugetlb_cma_reserve(int order) 1046{ 1047} 1048static inline __init void hugetlb_cma_check(void) 1049{ 1050} 1051#endif 1052 1053bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr); 1054 1055#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE 1056/* 1057 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can 1058 * implement this. 1059 */ 1060#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) 1061#endif 1062 1063#endif /* _LINUX_HUGETLB_H */