Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.18-rc1 1115 lines 30 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_HUGETLB_H 3#define _LINUX_HUGETLB_H 4 5#include <linux/mm_types.h> 6#include <linux/mmdebug.h> 7#include <linux/fs.h> 8#include <linux/hugetlb_inline.h> 9#include <linux/cgroup.h> 10#include <linux/list.h> 11#include <linux/kref.h> 12#include <linux/pgtable.h> 13#include <linux/gfp.h> 14#include <linux/userfaultfd_k.h> 15 16struct ctl_table; 17struct user_struct; 18struct mmu_gather; 19 20#ifndef is_hugepd 21typedef struct { unsigned long pd; } hugepd_t; 22#define is_hugepd(hugepd) (0) 23#define __hugepd(x) ((hugepd_t) { (x) }) 24#endif 25 26#ifdef CONFIG_HUGETLB_PAGE 27 28#include <linux/mempolicy.h> 29#include <linux/shm.h> 30#include <asm/tlbflush.h> 31 32/* 33 * For HugeTLB page, there are more metadata to save in the struct page. But 34 * the head struct page cannot meet our needs, so we have to abuse other tail 35 * struct page to store the metadata. In order to avoid conflicts caused by 36 * subsequent use of more tail struct pages, we gather these discrete indexes 37 * of tail struct page here. 38 */ 39enum { 40 SUBPAGE_INDEX_SUBPOOL = 1, /* reuse page->private */ 41#ifdef CONFIG_CGROUP_HUGETLB 42 SUBPAGE_INDEX_CGROUP, /* reuse page->private */ 43 SUBPAGE_INDEX_CGROUP_RSVD, /* reuse page->private */ 44 __MAX_CGROUP_SUBPAGE_INDEX = SUBPAGE_INDEX_CGROUP_RSVD, 45#endif 46 __NR_USED_SUBPAGE, 47}; 48 49struct hugepage_subpool { 50 spinlock_t lock; 51 long count; 52 long max_hpages; /* Maximum huge pages or -1 if no maximum. */ 53 long used_hpages; /* Used count against maximum, includes */ 54 /* both allocated and reserved pages. */ 55 struct hstate *hstate; 56 long min_hpages; /* Minimum huge pages or -1 if no minimum. */ 57 long rsv_hpages; /* Pages reserved against global pool to */ 58 /* satisfy minimum size. */ 59}; 60 61struct resv_map { 62 struct kref refs; 63 spinlock_t lock; 64 struct list_head regions; 65 long adds_in_progress; 66 struct list_head region_cache; 67 long region_cache_count; 68#ifdef CONFIG_CGROUP_HUGETLB 69 /* 70 * On private mappings, the counter to uncharge reservations is stored 71 * here. If these fields are 0, then either the mapping is shared, or 72 * cgroup accounting is disabled for this resv_map. 73 */ 74 struct page_counter *reservation_counter; 75 unsigned long pages_per_hpage; 76 struct cgroup_subsys_state *css; 77#endif 78}; 79 80/* 81 * Region tracking -- allows tracking of reservations and instantiated pages 82 * across the pages in a mapping. 83 * 84 * The region data structures are embedded into a resv_map and protected 85 * by a resv_map's lock. The set of regions within the resv_map represent 86 * reservations for huge pages, or huge pages that have already been 87 * instantiated within the map. The from and to elements are huge page 88 * indices into the associated mapping. from indicates the starting index 89 * of the region. to represents the first index past the end of the region. 90 * 91 * For example, a file region structure with from == 0 and to == 4 represents 92 * four huge pages in a mapping. It is important to note that the to element 93 * represents the first element past the end of the region. This is used in 94 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region. 95 * 96 * Interval notation of the form [from, to) will be used to indicate that 97 * the endpoint from is inclusive and to is exclusive. 98 */ 99struct file_region { 100 struct list_head link; 101 long from; 102 long to; 103#ifdef CONFIG_CGROUP_HUGETLB 104 /* 105 * On shared mappings, each reserved region appears as a struct 106 * file_region in resv_map. These fields hold the info needed to 107 * uncharge each reservation. 108 */ 109 struct page_counter *reservation_counter; 110 struct cgroup_subsys_state *css; 111#endif 112}; 113 114extern struct resv_map *resv_map_alloc(void); 115void resv_map_release(struct kref *ref); 116 117extern spinlock_t hugetlb_lock; 118extern int hugetlb_max_hstate __read_mostly; 119#define for_each_hstate(h) \ 120 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++) 121 122struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, 123 long min_hpages); 124void hugepage_put_subpool(struct hugepage_subpool *spool); 125 126void reset_vma_resv_huge_pages(struct vm_area_struct *vma); 127void clear_vma_resv_huge_pages(struct vm_area_struct *vma); 128int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *); 129int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *, 130 loff_t *); 131int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *, 132 loff_t *); 133int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *, 134 loff_t *); 135 136int move_hugetlb_page_tables(struct vm_area_struct *vma, 137 struct vm_area_struct *new_vma, 138 unsigned long old_addr, unsigned long new_addr, 139 unsigned long len); 140int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); 141long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, 142 struct page **, struct vm_area_struct **, 143 unsigned long *, unsigned long *, long, unsigned int, 144 int *); 145void unmap_hugepage_range(struct vm_area_struct *, 146 unsigned long, unsigned long, struct page *); 147void __unmap_hugepage_range_final(struct mmu_gather *tlb, 148 struct vm_area_struct *vma, 149 unsigned long start, unsigned long end, 150 struct page *ref_page); 151void hugetlb_report_meminfo(struct seq_file *); 152int hugetlb_report_node_meminfo(char *buf, int len, int nid); 153void hugetlb_show_meminfo(void); 154unsigned long hugetlb_total_pages(void); 155vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 156 unsigned long address, unsigned int flags); 157#ifdef CONFIG_USERFAULTFD 158int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte, 159 struct vm_area_struct *dst_vma, 160 unsigned long dst_addr, 161 unsigned long src_addr, 162 enum mcopy_atomic_mode mode, 163 struct page **pagep); 164#endif /* CONFIG_USERFAULTFD */ 165bool hugetlb_reserve_pages(struct inode *inode, long from, long to, 166 struct vm_area_struct *vma, 167 vm_flags_t vm_flags); 168long hugetlb_unreserve_pages(struct inode *inode, long start, long end, 169 long freed); 170bool isolate_huge_page(struct page *page, struct list_head *list); 171int get_hwpoison_huge_page(struct page *page, bool *hugetlb); 172void putback_active_hugepage(struct page *page); 173void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason); 174void free_huge_page(struct page *page); 175void hugetlb_fix_reserve_counts(struct inode *inode); 176extern struct mutex *hugetlb_fault_mutex_table; 177u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx); 178 179pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, 180 unsigned long addr, pud_t *pud); 181 182struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage); 183 184extern int sysctl_hugetlb_shm_group; 185extern struct list_head huge_boot_pages; 186 187/* arch callbacks */ 188 189pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, 190 unsigned long addr, unsigned long sz); 191pte_t *huge_pte_offset(struct mm_struct *mm, 192 unsigned long addr, unsigned long sz); 193int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, 194 unsigned long *addr, pte_t *ptep); 195void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, 196 unsigned long *start, unsigned long *end); 197struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, 198 int write); 199struct page *follow_huge_pd(struct vm_area_struct *vma, 200 unsigned long address, hugepd_t hpd, 201 int flags, int pdshift); 202struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, 203 pmd_t *pmd, int flags); 204struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address, 205 pud_t *pud, int flags); 206struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address, 207 pgd_t *pgd, int flags); 208 209int pmd_huge(pmd_t pmd); 210int pud_huge(pud_t pud); 211unsigned long hugetlb_change_protection(struct vm_area_struct *vma, 212 unsigned long address, unsigned long end, pgprot_t newprot); 213 214bool is_hugetlb_entry_migration(pte_t pte); 215void hugetlb_unshare_all_pmds(struct vm_area_struct *vma); 216 217#else /* !CONFIG_HUGETLB_PAGE */ 218 219static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) 220{ 221} 222 223static inline void clear_vma_resv_huge_pages(struct vm_area_struct *vma) 224{ 225} 226 227static inline unsigned long hugetlb_total_pages(void) 228{ 229 return 0; 230} 231 232static inline struct address_space *hugetlb_page_mapping_lock_write( 233 struct page *hpage) 234{ 235 return NULL; 236} 237 238static inline int huge_pmd_unshare(struct mm_struct *mm, 239 struct vm_area_struct *vma, 240 unsigned long *addr, pte_t *ptep) 241{ 242 return 0; 243} 244 245static inline void adjust_range_if_pmd_sharing_possible( 246 struct vm_area_struct *vma, 247 unsigned long *start, unsigned long *end) 248{ 249} 250 251static inline long follow_hugetlb_page(struct mm_struct *mm, 252 struct vm_area_struct *vma, struct page **pages, 253 struct vm_area_struct **vmas, unsigned long *position, 254 unsigned long *nr_pages, long i, unsigned int flags, 255 int *nonblocking) 256{ 257 BUG(); 258 return 0; 259} 260 261static inline struct page *follow_huge_addr(struct mm_struct *mm, 262 unsigned long address, int write) 263{ 264 return ERR_PTR(-EINVAL); 265} 266 267static inline int copy_hugetlb_page_range(struct mm_struct *dst, 268 struct mm_struct *src, struct vm_area_struct *vma) 269{ 270 BUG(); 271 return 0; 272} 273 274static inline int move_hugetlb_page_tables(struct vm_area_struct *vma, 275 struct vm_area_struct *new_vma, 276 unsigned long old_addr, 277 unsigned long new_addr, 278 unsigned long len) 279{ 280 BUG(); 281 return 0; 282} 283 284static inline void hugetlb_report_meminfo(struct seq_file *m) 285{ 286} 287 288static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid) 289{ 290 return 0; 291} 292 293static inline void hugetlb_show_meminfo(void) 294{ 295} 296 297static inline struct page *follow_huge_pd(struct vm_area_struct *vma, 298 unsigned long address, hugepd_t hpd, int flags, 299 int pdshift) 300{ 301 return NULL; 302} 303 304static inline struct page *follow_huge_pmd(struct mm_struct *mm, 305 unsigned long address, pmd_t *pmd, int flags) 306{ 307 return NULL; 308} 309 310static inline struct page *follow_huge_pud(struct mm_struct *mm, 311 unsigned long address, pud_t *pud, int flags) 312{ 313 return NULL; 314} 315 316static inline struct page *follow_huge_pgd(struct mm_struct *mm, 317 unsigned long address, pgd_t *pgd, int flags) 318{ 319 return NULL; 320} 321 322static inline int prepare_hugepage_range(struct file *file, 323 unsigned long addr, unsigned long len) 324{ 325 return -EINVAL; 326} 327 328static inline int pmd_huge(pmd_t pmd) 329{ 330 return 0; 331} 332 333static inline int pud_huge(pud_t pud) 334{ 335 return 0; 336} 337 338static inline int is_hugepage_only_range(struct mm_struct *mm, 339 unsigned long addr, unsigned long len) 340{ 341 return 0; 342} 343 344static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, 345 unsigned long addr, unsigned long end, 346 unsigned long floor, unsigned long ceiling) 347{ 348 BUG(); 349} 350 351#ifdef CONFIG_USERFAULTFD 352static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, 353 pte_t *dst_pte, 354 struct vm_area_struct *dst_vma, 355 unsigned long dst_addr, 356 unsigned long src_addr, 357 enum mcopy_atomic_mode mode, 358 struct page **pagep) 359{ 360 BUG(); 361 return 0; 362} 363#endif /* CONFIG_USERFAULTFD */ 364 365static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, 366 unsigned long sz) 367{ 368 return NULL; 369} 370 371static inline bool isolate_huge_page(struct page *page, struct list_head *list) 372{ 373 return false; 374} 375 376static inline int get_hwpoison_huge_page(struct page *page, bool *hugetlb) 377{ 378 return 0; 379} 380 381static inline void putback_active_hugepage(struct page *page) 382{ 383} 384 385static inline void move_hugetlb_state(struct page *oldpage, 386 struct page *newpage, int reason) 387{ 388} 389 390static inline unsigned long hugetlb_change_protection( 391 struct vm_area_struct *vma, unsigned long address, 392 unsigned long end, pgprot_t newprot) 393{ 394 return 0; 395} 396 397static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb, 398 struct vm_area_struct *vma, unsigned long start, 399 unsigned long end, struct page *ref_page) 400{ 401 BUG(); 402} 403 404static inline vm_fault_t hugetlb_fault(struct mm_struct *mm, 405 struct vm_area_struct *vma, unsigned long address, 406 unsigned int flags) 407{ 408 BUG(); 409 return 0; 410} 411 412static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { } 413 414#endif /* !CONFIG_HUGETLB_PAGE */ 415/* 416 * hugepages at page global directory. If arch support 417 * hugepages at pgd level, they need to define this. 418 */ 419#ifndef pgd_huge 420#define pgd_huge(x) 0 421#endif 422#ifndef p4d_huge 423#define p4d_huge(x) 0 424#endif 425 426#ifndef pgd_write 427static inline int pgd_write(pgd_t pgd) 428{ 429 BUG(); 430 return 0; 431} 432#endif 433 434#define HUGETLB_ANON_FILE "anon_hugepage" 435 436enum { 437 /* 438 * The file will be used as an shm file so shmfs accounting rules 439 * apply 440 */ 441 HUGETLB_SHMFS_INODE = 1, 442 /* 443 * The file is being created on the internal vfs mount and shmfs 444 * accounting rules do not apply 445 */ 446 HUGETLB_ANONHUGE_INODE = 2, 447}; 448 449#ifdef CONFIG_HUGETLBFS 450struct hugetlbfs_sb_info { 451 long max_inodes; /* inodes allowed */ 452 long free_inodes; /* inodes free */ 453 spinlock_t stat_lock; 454 struct hstate *hstate; 455 struct hugepage_subpool *spool; 456 kuid_t uid; 457 kgid_t gid; 458 umode_t mode; 459}; 460 461static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) 462{ 463 return sb->s_fs_info; 464} 465 466struct hugetlbfs_inode_info { 467 struct shared_policy policy; 468 struct inode vfs_inode; 469 unsigned int seals; 470}; 471 472static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode) 473{ 474 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode); 475} 476 477extern const struct file_operations hugetlbfs_file_operations; 478extern const struct vm_operations_struct hugetlb_vm_ops; 479struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct, 480 int creat_flags, int page_size_log); 481 482static inline bool is_file_hugepages(struct file *file) 483{ 484 if (file->f_op == &hugetlbfs_file_operations) 485 return true; 486 487 return is_file_shm_hugepages(file); 488} 489 490static inline struct hstate *hstate_inode(struct inode *i) 491{ 492 return HUGETLBFS_SB(i->i_sb)->hstate; 493} 494#else /* !CONFIG_HUGETLBFS */ 495 496#define is_file_hugepages(file) false 497static inline struct file * 498hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag, 499 int creat_flags, int page_size_log) 500{ 501 return ERR_PTR(-ENOSYS); 502} 503 504static inline struct hstate *hstate_inode(struct inode *i) 505{ 506 return NULL; 507} 508#endif /* !CONFIG_HUGETLBFS */ 509 510#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA 511unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 512 unsigned long len, unsigned long pgoff, 513 unsigned long flags); 514#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */ 515 516/* 517 * huegtlb page specific state flags. These flags are located in page.private 518 * of the hugetlb head page. Functions created via the below macros should be 519 * used to manipulate these flags. 520 * 521 * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at 522 * allocation time. Cleared when page is fully instantiated. Free 523 * routine checks flag to restore a reservation on error paths. 524 * Synchronization: Examined or modified by code that knows it has 525 * the only reference to page. i.e. After allocation but before use 526 * or when the page is being freed. 527 * HPG_migratable - Set after a newly allocated page is added to the page 528 * cache and/or page tables. Indicates the page is a candidate for 529 * migration. 530 * Synchronization: Initially set after new page allocation with no 531 * locking. When examined and modified during migration processing 532 * (isolate, migrate, putback) the hugetlb_lock is held. 533 * HPG_temporary - - Set on a page that is temporarily allocated from the buddy 534 * allocator. Typically used for migration target pages when no pages 535 * are available in the pool. The hugetlb free page path will 536 * immediately free pages with this flag set to the buddy allocator. 537 * Synchronization: Can be set after huge page allocation from buddy when 538 * code knows it has only reference. All other examinations and 539 * modifications require hugetlb_lock. 540 * HPG_freed - Set when page is on the free lists. 541 * Synchronization: hugetlb_lock held for examination and modification. 542 * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed. 543 */ 544enum hugetlb_page_flags { 545 HPG_restore_reserve = 0, 546 HPG_migratable, 547 HPG_temporary, 548 HPG_freed, 549 HPG_vmemmap_optimized, 550 __NR_HPAGEFLAGS, 551}; 552 553/* 554 * Macros to create test, set and clear function definitions for 555 * hugetlb specific page flags. 556 */ 557#ifdef CONFIG_HUGETLB_PAGE 558#define TESTHPAGEFLAG(uname, flname) \ 559static inline int HPage##uname(struct page *page) \ 560 { return test_bit(HPG_##flname, &(page->private)); } 561 562#define SETHPAGEFLAG(uname, flname) \ 563static inline void SetHPage##uname(struct page *page) \ 564 { set_bit(HPG_##flname, &(page->private)); } 565 566#define CLEARHPAGEFLAG(uname, flname) \ 567static inline void ClearHPage##uname(struct page *page) \ 568 { clear_bit(HPG_##flname, &(page->private)); } 569#else 570#define TESTHPAGEFLAG(uname, flname) \ 571static inline int HPage##uname(struct page *page) \ 572 { return 0; } 573 574#define SETHPAGEFLAG(uname, flname) \ 575static inline void SetHPage##uname(struct page *page) \ 576 { } 577 578#define CLEARHPAGEFLAG(uname, flname) \ 579static inline void ClearHPage##uname(struct page *page) \ 580 { } 581#endif 582 583#define HPAGEFLAG(uname, flname) \ 584 TESTHPAGEFLAG(uname, flname) \ 585 SETHPAGEFLAG(uname, flname) \ 586 CLEARHPAGEFLAG(uname, flname) \ 587 588/* 589 * Create functions associated with hugetlb page flags 590 */ 591HPAGEFLAG(RestoreReserve, restore_reserve) 592HPAGEFLAG(Migratable, migratable) 593HPAGEFLAG(Temporary, temporary) 594HPAGEFLAG(Freed, freed) 595HPAGEFLAG(VmemmapOptimized, vmemmap_optimized) 596 597#ifdef CONFIG_HUGETLB_PAGE 598 599#define HSTATE_NAME_LEN 32 600/* Defines one hugetlb page size */ 601struct hstate { 602 struct mutex resize_lock; 603 int next_nid_to_alloc; 604 int next_nid_to_free; 605 unsigned int order; 606 unsigned int demote_order; 607 unsigned long mask; 608 unsigned long max_huge_pages; 609 unsigned long nr_huge_pages; 610 unsigned long free_huge_pages; 611 unsigned long resv_huge_pages; 612 unsigned long surplus_huge_pages; 613 unsigned long nr_overcommit_huge_pages; 614 struct list_head hugepage_activelist; 615 struct list_head hugepage_freelists[MAX_NUMNODES]; 616 unsigned int max_huge_pages_node[MAX_NUMNODES]; 617 unsigned int nr_huge_pages_node[MAX_NUMNODES]; 618 unsigned int free_huge_pages_node[MAX_NUMNODES]; 619 unsigned int surplus_huge_pages_node[MAX_NUMNODES]; 620#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP 621 unsigned int nr_free_vmemmap_pages; 622#endif 623#ifdef CONFIG_CGROUP_HUGETLB 624 /* cgroup control files */ 625 struct cftype cgroup_files_dfl[8]; 626 struct cftype cgroup_files_legacy[10]; 627#endif 628 char name[HSTATE_NAME_LEN]; 629}; 630 631struct huge_bootmem_page { 632 struct list_head list; 633 struct hstate *hstate; 634}; 635 636int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list); 637struct page *alloc_huge_page(struct vm_area_struct *vma, 638 unsigned long addr, int avoid_reserve); 639struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, 640 nodemask_t *nmask, gfp_t gfp_mask); 641struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, 642 unsigned long address); 643int huge_add_to_page_cache(struct page *page, struct address_space *mapping, 644 pgoff_t idx); 645void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma, 646 unsigned long address, struct page *page); 647 648/* arch callback */ 649int __init __alloc_bootmem_huge_page(struct hstate *h, int nid); 650int __init alloc_bootmem_huge_page(struct hstate *h, int nid); 651bool __init hugetlb_node_alloc_supported(void); 652 653void __init hugetlb_add_hstate(unsigned order); 654bool __init arch_hugetlb_valid_size(unsigned long size); 655struct hstate *size_to_hstate(unsigned long size); 656 657#ifndef HUGE_MAX_HSTATE 658#define HUGE_MAX_HSTATE 1 659#endif 660 661extern struct hstate hstates[HUGE_MAX_HSTATE]; 662extern unsigned int default_hstate_idx; 663 664#define default_hstate (hstates[default_hstate_idx]) 665 666/* 667 * hugetlb page subpool pointer located in hpage[1].private 668 */ 669static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage) 670{ 671 return (void *)page_private(hpage + SUBPAGE_INDEX_SUBPOOL); 672} 673 674static inline void hugetlb_set_page_subpool(struct page *hpage, 675 struct hugepage_subpool *subpool) 676{ 677 set_page_private(hpage + SUBPAGE_INDEX_SUBPOOL, (unsigned long)subpool); 678} 679 680static inline struct hstate *hstate_file(struct file *f) 681{ 682 return hstate_inode(file_inode(f)); 683} 684 685static inline struct hstate *hstate_sizelog(int page_size_log) 686{ 687 if (!page_size_log) 688 return &default_hstate; 689 690 return size_to_hstate(1UL << page_size_log); 691} 692 693static inline struct hstate *hstate_vma(struct vm_area_struct *vma) 694{ 695 return hstate_file(vma->vm_file); 696} 697 698static inline unsigned long huge_page_size(struct hstate *h) 699{ 700 return (unsigned long)PAGE_SIZE << h->order; 701} 702 703extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma); 704 705extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma); 706 707static inline unsigned long huge_page_mask(struct hstate *h) 708{ 709 return h->mask; 710} 711 712static inline unsigned int huge_page_order(struct hstate *h) 713{ 714 return h->order; 715} 716 717static inline unsigned huge_page_shift(struct hstate *h) 718{ 719 return h->order + PAGE_SHIFT; 720} 721 722static inline bool hstate_is_gigantic(struct hstate *h) 723{ 724 return huge_page_order(h) >= MAX_ORDER; 725} 726 727static inline unsigned int pages_per_huge_page(struct hstate *h) 728{ 729 return 1 << h->order; 730} 731 732static inline unsigned int blocks_per_huge_page(struct hstate *h) 733{ 734 return huge_page_size(h) / 512; 735} 736 737#include <asm/hugetlb.h> 738 739#ifndef is_hugepage_only_range 740static inline int is_hugepage_only_range(struct mm_struct *mm, 741 unsigned long addr, unsigned long len) 742{ 743 return 0; 744} 745#define is_hugepage_only_range is_hugepage_only_range 746#endif 747 748#ifndef arch_clear_hugepage_flags 749static inline void arch_clear_hugepage_flags(struct page *page) { } 750#define arch_clear_hugepage_flags arch_clear_hugepage_flags 751#endif 752 753#ifndef arch_make_huge_pte 754static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, 755 vm_flags_t flags) 756{ 757 return pte_mkhuge(entry); 758} 759#endif 760 761static inline struct hstate *page_hstate(struct page *page) 762{ 763 VM_BUG_ON_PAGE(!PageHuge(page), page); 764 return size_to_hstate(page_size(page)); 765} 766 767static inline unsigned hstate_index_to_shift(unsigned index) 768{ 769 return hstates[index].order + PAGE_SHIFT; 770} 771 772static inline int hstate_index(struct hstate *h) 773{ 774 return h - hstates; 775} 776 777extern int dissolve_free_huge_page(struct page *page); 778extern int dissolve_free_huge_pages(unsigned long start_pfn, 779 unsigned long end_pfn); 780 781#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION 782#ifndef arch_hugetlb_migration_supported 783static inline bool arch_hugetlb_migration_supported(struct hstate *h) 784{ 785 if ((huge_page_shift(h) == PMD_SHIFT) || 786 (huge_page_shift(h) == PUD_SHIFT) || 787 (huge_page_shift(h) == PGDIR_SHIFT)) 788 return true; 789 else 790 return false; 791} 792#endif 793#else 794static inline bool arch_hugetlb_migration_supported(struct hstate *h) 795{ 796 return false; 797} 798#endif 799 800static inline bool hugepage_migration_supported(struct hstate *h) 801{ 802 return arch_hugetlb_migration_supported(h); 803} 804 805/* 806 * Movability check is different as compared to migration check. 807 * It determines whether or not a huge page should be placed on 808 * movable zone or not. Movability of any huge page should be 809 * required only if huge page size is supported for migration. 810 * There won't be any reason for the huge page to be movable if 811 * it is not migratable to start with. Also the size of the huge 812 * page should be large enough to be placed under a movable zone 813 * and still feasible enough to be migratable. Just the presence 814 * in movable zone does not make the migration feasible. 815 * 816 * So even though large huge page sizes like the gigantic ones 817 * are migratable they should not be movable because its not 818 * feasible to migrate them from movable zone. 819 */ 820static inline bool hugepage_movable_supported(struct hstate *h) 821{ 822 if (!hugepage_migration_supported(h)) 823 return false; 824 825 if (hstate_is_gigantic(h)) 826 return false; 827 return true; 828} 829 830/* Movability of hugepages depends on migration support. */ 831static inline gfp_t htlb_alloc_mask(struct hstate *h) 832{ 833 if (hugepage_movable_supported(h)) 834 return GFP_HIGHUSER_MOVABLE; 835 else 836 return GFP_HIGHUSER; 837} 838 839static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask) 840{ 841 gfp_t modified_mask = htlb_alloc_mask(h); 842 843 /* Some callers might want to enforce node */ 844 modified_mask |= (gfp_mask & __GFP_THISNODE); 845 846 modified_mask |= (gfp_mask & __GFP_NOWARN); 847 848 return modified_mask; 849} 850 851static inline spinlock_t *huge_pte_lockptr(struct hstate *h, 852 struct mm_struct *mm, pte_t *pte) 853{ 854 if (huge_page_size(h) == PMD_SIZE) 855 return pmd_lockptr(mm, (pmd_t *) pte); 856 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE); 857 return &mm->page_table_lock; 858} 859 860#ifndef hugepages_supported 861/* 862 * Some platform decide whether they support huge pages at boot 863 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0 864 * when there is no such support 865 */ 866#define hugepages_supported() (HPAGE_SHIFT != 0) 867#endif 868 869void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm); 870 871static inline void hugetlb_count_init(struct mm_struct *mm) 872{ 873 atomic_long_set(&mm->hugetlb_usage, 0); 874} 875 876static inline void hugetlb_count_add(long l, struct mm_struct *mm) 877{ 878 atomic_long_add(l, &mm->hugetlb_usage); 879} 880 881static inline void hugetlb_count_sub(long l, struct mm_struct *mm) 882{ 883 atomic_long_sub(l, &mm->hugetlb_usage); 884} 885 886#ifndef set_huge_swap_pte_at 887static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, 888 pte_t *ptep, pte_t pte, unsigned long sz) 889{ 890 set_huge_pte_at(mm, addr, ptep, pte); 891} 892#endif 893 894#ifndef huge_ptep_modify_prot_start 895#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start 896static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, 897 unsigned long addr, pte_t *ptep) 898{ 899 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); 900} 901#endif 902 903#ifndef huge_ptep_modify_prot_commit 904#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit 905static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, 906 unsigned long addr, pte_t *ptep, 907 pte_t old_pte, pte_t pte) 908{ 909 set_huge_pte_at(vma->vm_mm, addr, ptep, pte); 910} 911#endif 912 913#else /* CONFIG_HUGETLB_PAGE */ 914struct hstate {}; 915 916static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage) 917{ 918 return NULL; 919} 920 921static inline int isolate_or_dissolve_huge_page(struct page *page, 922 struct list_head *list) 923{ 924 return -ENOMEM; 925} 926 927static inline struct page *alloc_huge_page(struct vm_area_struct *vma, 928 unsigned long addr, 929 int avoid_reserve) 930{ 931 return NULL; 932} 933 934static inline struct page * 935alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, 936 nodemask_t *nmask, gfp_t gfp_mask) 937{ 938 return NULL; 939} 940 941static inline struct page *alloc_huge_page_vma(struct hstate *h, 942 struct vm_area_struct *vma, 943 unsigned long address) 944{ 945 return NULL; 946} 947 948static inline int __alloc_bootmem_huge_page(struct hstate *h) 949{ 950 return 0; 951} 952 953static inline struct hstate *hstate_file(struct file *f) 954{ 955 return NULL; 956} 957 958static inline struct hstate *hstate_sizelog(int page_size_log) 959{ 960 return NULL; 961} 962 963static inline struct hstate *hstate_vma(struct vm_area_struct *vma) 964{ 965 return NULL; 966} 967 968static inline struct hstate *page_hstate(struct page *page) 969{ 970 return NULL; 971} 972 973static inline struct hstate *size_to_hstate(unsigned long size) 974{ 975 return NULL; 976} 977 978static inline unsigned long huge_page_size(struct hstate *h) 979{ 980 return PAGE_SIZE; 981} 982 983static inline unsigned long huge_page_mask(struct hstate *h) 984{ 985 return PAGE_MASK; 986} 987 988static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) 989{ 990 return PAGE_SIZE; 991} 992 993static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) 994{ 995 return PAGE_SIZE; 996} 997 998static inline unsigned int huge_page_order(struct hstate *h) 999{ 1000 return 0; 1001} 1002 1003static inline unsigned int huge_page_shift(struct hstate *h) 1004{ 1005 return PAGE_SHIFT; 1006} 1007 1008static inline bool hstate_is_gigantic(struct hstate *h) 1009{ 1010 return false; 1011} 1012 1013static inline unsigned int pages_per_huge_page(struct hstate *h) 1014{ 1015 return 1; 1016} 1017 1018static inline unsigned hstate_index_to_shift(unsigned index) 1019{ 1020 return 0; 1021} 1022 1023static inline int hstate_index(struct hstate *h) 1024{ 1025 return 0; 1026} 1027 1028static inline int dissolve_free_huge_page(struct page *page) 1029{ 1030 return 0; 1031} 1032 1033static inline int dissolve_free_huge_pages(unsigned long start_pfn, 1034 unsigned long end_pfn) 1035{ 1036 return 0; 1037} 1038 1039static inline bool hugepage_migration_supported(struct hstate *h) 1040{ 1041 return false; 1042} 1043 1044static inline bool hugepage_movable_supported(struct hstate *h) 1045{ 1046 return false; 1047} 1048 1049static inline gfp_t htlb_alloc_mask(struct hstate *h) 1050{ 1051 return 0; 1052} 1053 1054static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask) 1055{ 1056 return 0; 1057} 1058 1059static inline spinlock_t *huge_pte_lockptr(struct hstate *h, 1060 struct mm_struct *mm, pte_t *pte) 1061{ 1062 return &mm->page_table_lock; 1063} 1064 1065static inline void hugetlb_count_init(struct mm_struct *mm) 1066{ 1067} 1068 1069static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m) 1070{ 1071} 1072 1073static inline void hugetlb_count_sub(long l, struct mm_struct *mm) 1074{ 1075} 1076 1077static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, 1078 pte_t *ptep, pte_t pte, unsigned long sz) 1079{ 1080} 1081#endif /* CONFIG_HUGETLB_PAGE */ 1082 1083static inline spinlock_t *huge_pte_lock(struct hstate *h, 1084 struct mm_struct *mm, pte_t *pte) 1085{ 1086 spinlock_t *ptl; 1087 1088 ptl = huge_pte_lockptr(h, mm, pte); 1089 spin_lock(ptl); 1090 return ptl; 1091} 1092 1093#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA) 1094extern void __init hugetlb_cma_reserve(int order); 1095extern void __init hugetlb_cma_check(void); 1096#else 1097static inline __init void hugetlb_cma_reserve(int order) 1098{ 1099} 1100static inline __init void hugetlb_cma_check(void) 1101{ 1102} 1103#endif 1104 1105bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr); 1106 1107#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE 1108/* 1109 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can 1110 * implement this. 1111 */ 1112#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) 1113#endif 1114 1115#endif /* _LINUX_HUGETLB_H */