Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v6.6 1299 lines 36 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_HUGETLB_H 3#define _LINUX_HUGETLB_H 4 5#include <linux/mm.h> 6#include <linux/mm_types.h> 7#include <linux/mmdebug.h> 8#include <linux/fs.h> 9#include <linux/hugetlb_inline.h> 10#include <linux/cgroup.h> 11#include <linux/page_ref.h> 12#include <linux/list.h> 13#include <linux/kref.h> 14#include <linux/pgtable.h> 15#include <linux/gfp.h> 16#include <linux/userfaultfd_k.h> 17 18struct ctl_table; 19struct user_struct; 20struct mmu_gather; 21struct node; 22 23#ifndef CONFIG_ARCH_HAS_HUGEPD 24typedef struct { unsigned long pd; } hugepd_t; 25#define is_hugepd(hugepd) (0) 26#define __hugepd(x) ((hugepd_t) { (x) }) 27#endif 28 29void free_huge_folio(struct folio *folio); 30 31#ifdef CONFIG_HUGETLB_PAGE 32 33#include <linux/mempolicy.h> 34#include <linux/shm.h> 35#include <asm/tlbflush.h> 36 37/* 38 * For HugeTLB page, there are more metadata to save in the struct page. But 39 * the head struct page cannot meet our needs, so we have to abuse other tail 40 * struct page to store the metadata. 41 */ 42#define __NR_USED_SUBPAGE 3 43 44struct hugepage_subpool { 45 spinlock_t lock; 46 long count; 47 long max_hpages; /* Maximum huge pages or -1 if no maximum. */ 48 long used_hpages; /* Used count against maximum, includes */ 49 /* both allocated and reserved pages. */ 50 struct hstate *hstate; 51 long min_hpages; /* Minimum huge pages or -1 if no minimum. */ 52 long rsv_hpages; /* Pages reserved against global pool to */ 53 /* satisfy minimum size. */ 54}; 55 56struct resv_map { 57 struct kref refs; 58 spinlock_t lock; 59 struct list_head regions; 60 long adds_in_progress; 61 struct list_head region_cache; 62 long region_cache_count; 63 struct rw_semaphore rw_sema; 64#ifdef CONFIG_CGROUP_HUGETLB 65 /* 66 * On private mappings, the counter to uncharge reservations is stored 67 * here. If these fields are 0, then either the mapping is shared, or 68 * cgroup accounting is disabled for this resv_map. 69 */ 70 struct page_counter *reservation_counter; 71 unsigned long pages_per_hpage; 72 struct cgroup_subsys_state *css; 73#endif 74}; 75 76/* 77 * Region tracking -- allows tracking of reservations and instantiated pages 78 * across the pages in a mapping. 79 * 80 * The region data structures are embedded into a resv_map and protected 81 * by a resv_map's lock. The set of regions within the resv_map represent 82 * reservations for huge pages, or huge pages that have already been 83 * instantiated within the map. The from and to elements are huge page 84 * indices into the associated mapping. from indicates the starting index 85 * of the region. to represents the first index past the end of the region. 86 * 87 * For example, a file region structure with from == 0 and to == 4 represents 88 * four huge pages in a mapping. It is important to note that the to element 89 * represents the first element past the end of the region. This is used in 90 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region. 91 * 92 * Interval notation of the form [from, to) will be used to indicate that 93 * the endpoint from is inclusive and to is exclusive. 94 */ 95struct file_region { 96 struct list_head link; 97 long from; 98 long to; 99#ifdef CONFIG_CGROUP_HUGETLB 100 /* 101 * On shared mappings, each reserved region appears as a struct 102 * file_region in resv_map. These fields hold the info needed to 103 * uncharge each reservation. 104 */ 105 struct page_counter *reservation_counter; 106 struct cgroup_subsys_state *css; 107#endif 108}; 109 110struct hugetlb_vma_lock { 111 struct kref refs; 112 struct rw_semaphore rw_sema; 113 struct vm_area_struct *vma; 114}; 115 116extern struct resv_map *resv_map_alloc(void); 117void resv_map_release(struct kref *ref); 118 119extern spinlock_t hugetlb_lock; 120extern int hugetlb_max_hstate __read_mostly; 121#define for_each_hstate(h) \ 122 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++) 123 124struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, 125 long min_hpages); 126void hugepage_put_subpool(struct hugepage_subpool *spool); 127 128void hugetlb_dup_vma_private(struct vm_area_struct *vma); 129void clear_vma_resv_huge_pages(struct vm_area_struct *vma); 130int move_hugetlb_page_tables(struct vm_area_struct *vma, 131 struct vm_area_struct *new_vma, 132 unsigned long old_addr, unsigned long new_addr, 133 unsigned long len); 134int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, 135 struct vm_area_struct *, struct vm_area_struct *); 136struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma, 137 unsigned long address, unsigned int flags, 138 unsigned int *page_mask); 139void unmap_hugepage_range(struct vm_area_struct *, 140 unsigned long, unsigned long, struct page *, 141 zap_flags_t); 142void __unmap_hugepage_range(struct mmu_gather *tlb, 143 struct vm_area_struct *vma, 144 unsigned long start, unsigned long end, 145 struct page *ref_page, zap_flags_t zap_flags); 146void hugetlb_report_meminfo(struct seq_file *); 147int hugetlb_report_node_meminfo(char *buf, int len, int nid); 148void hugetlb_show_meminfo_node(int nid); 149unsigned long hugetlb_total_pages(void); 150vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 151 unsigned long address, unsigned int flags); 152#ifdef CONFIG_USERFAULTFD 153int hugetlb_mfill_atomic_pte(pte_t *dst_pte, 154 struct vm_area_struct *dst_vma, 155 unsigned long dst_addr, 156 unsigned long src_addr, 157 uffd_flags_t flags, 158 struct folio **foliop); 159#endif /* CONFIG_USERFAULTFD */ 160bool hugetlb_reserve_pages(struct inode *inode, long from, long to, 161 struct vm_area_struct *vma, 162 vm_flags_t vm_flags); 163long hugetlb_unreserve_pages(struct inode *inode, long start, long end, 164 long freed); 165bool isolate_hugetlb(struct folio *folio, struct list_head *list); 166int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison); 167int get_huge_page_for_hwpoison(unsigned long pfn, int flags, 168 bool *migratable_cleared); 169void folio_putback_active_hugetlb(struct folio *folio); 170void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason); 171void hugetlb_fix_reserve_counts(struct inode *inode); 172extern struct mutex *hugetlb_fault_mutex_table; 173u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx); 174 175pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, 176 unsigned long addr, pud_t *pud); 177 178struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage); 179 180extern int sysctl_hugetlb_shm_group; 181extern struct list_head huge_boot_pages; 182 183/* arch callbacks */ 184 185#ifndef CONFIG_HIGHPTE 186/* 187 * pte_offset_huge() and pte_alloc_huge() are helpers for those architectures 188 * which may go down to the lowest PTE level in their huge_pte_offset() and 189 * huge_pte_alloc(): to avoid reliance on pte_offset_map() without pte_unmap(). 190 */ 191static inline pte_t *pte_offset_huge(pmd_t *pmd, unsigned long address) 192{ 193 return pte_offset_kernel(pmd, address); 194} 195static inline pte_t *pte_alloc_huge(struct mm_struct *mm, pmd_t *pmd, 196 unsigned long address) 197{ 198 return pte_alloc(mm, pmd) ? NULL : pte_offset_huge(pmd, address); 199} 200#endif 201 202pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, 203 unsigned long addr, unsigned long sz); 204/* 205 * huge_pte_offset(): Walk the hugetlb pgtable until the last level PTE. 206 * Returns the pte_t* if found, or NULL if the address is not mapped. 207 * 208 * IMPORTANT: we should normally not directly call this function, instead 209 * this is only a common interface to implement arch-specific 210 * walker. Please use hugetlb_walk() instead, because that will attempt to 211 * verify the locking for you. 212 * 213 * Since this function will walk all the pgtable pages (including not only 214 * high-level pgtable page, but also PUD entry that can be unshared 215 * concurrently for VM_SHARED), the caller of this function should be 216 * responsible of its thread safety. One can follow this rule: 217 * 218 * (1) For private mappings: pmd unsharing is not possible, so holding the 219 * mmap_lock for either read or write is sufficient. Most callers 220 * already hold the mmap_lock, so normally, no special action is 221 * required. 222 * 223 * (2) For shared mappings: pmd unsharing is possible (so the PUD-ranged 224 * pgtable page can go away from under us! It can be done by a pmd 225 * unshare with a follow up munmap() on the other process), then we 226 * need either: 227 * 228 * (2.1) hugetlb vma lock read or write held, to make sure pmd unshare 229 * won't happen upon the range (it also makes sure the pte_t we 230 * read is the right and stable one), or, 231 * 232 * (2.2) hugetlb mapping i_mmap_rwsem lock held read or write, to make 233 * sure even if unshare happened the racy unmap() will wait until 234 * i_mmap_rwsem is released. 235 * 236 * Option (2.1) is the safest, which guarantees pte stability from pmd 237 * sharing pov, until the vma lock released. Option (2.2) doesn't protect 238 * a concurrent pmd unshare, but it makes sure the pgtable page is safe to 239 * access. 240 */ 241pte_t *huge_pte_offset(struct mm_struct *mm, 242 unsigned long addr, unsigned long sz); 243unsigned long hugetlb_mask_last_page(struct hstate *h); 244int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, 245 unsigned long addr, pte_t *ptep); 246void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, 247 unsigned long *start, unsigned long *end); 248 249extern void __hugetlb_zap_begin(struct vm_area_struct *vma, 250 unsigned long *begin, unsigned long *end); 251extern void __hugetlb_zap_end(struct vm_area_struct *vma, 252 struct zap_details *details); 253 254static inline void hugetlb_zap_begin(struct vm_area_struct *vma, 255 unsigned long *start, unsigned long *end) 256{ 257 if (is_vm_hugetlb_page(vma)) 258 __hugetlb_zap_begin(vma, start, end); 259} 260 261static inline void hugetlb_zap_end(struct vm_area_struct *vma, 262 struct zap_details *details) 263{ 264 if (is_vm_hugetlb_page(vma)) 265 __hugetlb_zap_end(vma, details); 266} 267 268void hugetlb_vma_lock_read(struct vm_area_struct *vma); 269void hugetlb_vma_unlock_read(struct vm_area_struct *vma); 270void hugetlb_vma_lock_write(struct vm_area_struct *vma); 271void hugetlb_vma_unlock_write(struct vm_area_struct *vma); 272int hugetlb_vma_trylock_write(struct vm_area_struct *vma); 273void hugetlb_vma_assert_locked(struct vm_area_struct *vma); 274void hugetlb_vma_lock_release(struct kref *kref); 275 276int pmd_huge(pmd_t pmd); 277int pud_huge(pud_t pud); 278long hugetlb_change_protection(struct vm_area_struct *vma, 279 unsigned long address, unsigned long end, pgprot_t newprot, 280 unsigned long cp_flags); 281 282bool is_hugetlb_entry_migration(pte_t pte); 283void hugetlb_unshare_all_pmds(struct vm_area_struct *vma); 284 285#else /* !CONFIG_HUGETLB_PAGE */ 286 287static inline void hugetlb_dup_vma_private(struct vm_area_struct *vma) 288{ 289} 290 291static inline void clear_vma_resv_huge_pages(struct vm_area_struct *vma) 292{ 293} 294 295static inline unsigned long hugetlb_total_pages(void) 296{ 297 return 0; 298} 299 300static inline struct address_space *hugetlb_page_mapping_lock_write( 301 struct page *hpage) 302{ 303 return NULL; 304} 305 306static inline int huge_pmd_unshare(struct mm_struct *mm, 307 struct vm_area_struct *vma, 308 unsigned long addr, pte_t *ptep) 309{ 310 return 0; 311} 312 313static inline void adjust_range_if_pmd_sharing_possible( 314 struct vm_area_struct *vma, 315 unsigned long *start, unsigned long *end) 316{ 317} 318 319static inline void hugetlb_zap_begin( 320 struct vm_area_struct *vma, 321 unsigned long *start, unsigned long *end) 322{ 323} 324 325static inline void hugetlb_zap_end( 326 struct vm_area_struct *vma, 327 struct zap_details *details) 328{ 329} 330 331static inline struct page *hugetlb_follow_page_mask( 332 struct vm_area_struct *vma, unsigned long address, unsigned int flags, 333 unsigned int *page_mask) 334{ 335 BUILD_BUG(); /* should never be compiled in if !CONFIG_HUGETLB_PAGE*/ 336} 337 338static inline int copy_hugetlb_page_range(struct mm_struct *dst, 339 struct mm_struct *src, 340 struct vm_area_struct *dst_vma, 341 struct vm_area_struct *src_vma) 342{ 343 BUG(); 344 return 0; 345} 346 347static inline int move_hugetlb_page_tables(struct vm_area_struct *vma, 348 struct vm_area_struct *new_vma, 349 unsigned long old_addr, 350 unsigned long new_addr, 351 unsigned long len) 352{ 353 BUG(); 354 return 0; 355} 356 357static inline void hugetlb_report_meminfo(struct seq_file *m) 358{ 359} 360 361static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid) 362{ 363 return 0; 364} 365 366static inline void hugetlb_show_meminfo_node(int nid) 367{ 368} 369 370static inline int prepare_hugepage_range(struct file *file, 371 unsigned long addr, unsigned long len) 372{ 373 return -EINVAL; 374} 375 376static inline void hugetlb_vma_lock_read(struct vm_area_struct *vma) 377{ 378} 379 380static inline void hugetlb_vma_unlock_read(struct vm_area_struct *vma) 381{ 382} 383 384static inline void hugetlb_vma_lock_write(struct vm_area_struct *vma) 385{ 386} 387 388static inline void hugetlb_vma_unlock_write(struct vm_area_struct *vma) 389{ 390} 391 392static inline int hugetlb_vma_trylock_write(struct vm_area_struct *vma) 393{ 394 return 1; 395} 396 397static inline void hugetlb_vma_assert_locked(struct vm_area_struct *vma) 398{ 399} 400 401static inline int pmd_huge(pmd_t pmd) 402{ 403 return 0; 404} 405 406static inline int pud_huge(pud_t pud) 407{ 408 return 0; 409} 410 411static inline int is_hugepage_only_range(struct mm_struct *mm, 412 unsigned long addr, unsigned long len) 413{ 414 return 0; 415} 416 417static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, 418 unsigned long addr, unsigned long end, 419 unsigned long floor, unsigned long ceiling) 420{ 421 BUG(); 422} 423 424#ifdef CONFIG_USERFAULTFD 425static inline int hugetlb_mfill_atomic_pte(pte_t *dst_pte, 426 struct vm_area_struct *dst_vma, 427 unsigned long dst_addr, 428 unsigned long src_addr, 429 uffd_flags_t flags, 430 struct folio **foliop) 431{ 432 BUG(); 433 return 0; 434} 435#endif /* CONFIG_USERFAULTFD */ 436 437static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, 438 unsigned long sz) 439{ 440 return NULL; 441} 442 443static inline bool isolate_hugetlb(struct folio *folio, struct list_head *list) 444{ 445 return false; 446} 447 448static inline int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison) 449{ 450 return 0; 451} 452 453static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags, 454 bool *migratable_cleared) 455{ 456 return 0; 457} 458 459static inline void folio_putback_active_hugetlb(struct folio *folio) 460{ 461} 462 463static inline void move_hugetlb_state(struct folio *old_folio, 464 struct folio *new_folio, int reason) 465{ 466} 467 468static inline long hugetlb_change_protection( 469 struct vm_area_struct *vma, unsigned long address, 470 unsigned long end, pgprot_t newprot, 471 unsigned long cp_flags) 472{ 473 return 0; 474} 475 476static inline void __unmap_hugepage_range(struct mmu_gather *tlb, 477 struct vm_area_struct *vma, unsigned long start, 478 unsigned long end, struct page *ref_page, 479 zap_flags_t zap_flags) 480{ 481 BUG(); 482} 483 484static inline vm_fault_t hugetlb_fault(struct mm_struct *mm, 485 struct vm_area_struct *vma, unsigned long address, 486 unsigned int flags) 487{ 488 BUG(); 489 return 0; 490} 491 492static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { } 493 494#endif /* !CONFIG_HUGETLB_PAGE */ 495/* 496 * hugepages at page global directory. If arch support 497 * hugepages at pgd level, they need to define this. 498 */ 499#ifndef pgd_huge 500#define pgd_huge(x) 0 501#endif 502#ifndef p4d_huge 503#define p4d_huge(x) 0 504#endif 505 506#ifndef pgd_write 507static inline int pgd_write(pgd_t pgd) 508{ 509 BUG(); 510 return 0; 511} 512#endif 513 514#define HUGETLB_ANON_FILE "anon_hugepage" 515 516enum { 517 /* 518 * The file will be used as an shm file so shmfs accounting rules 519 * apply 520 */ 521 HUGETLB_SHMFS_INODE = 1, 522 /* 523 * The file is being created on the internal vfs mount and shmfs 524 * accounting rules do not apply 525 */ 526 HUGETLB_ANONHUGE_INODE = 2, 527}; 528 529#ifdef CONFIG_HUGETLBFS 530struct hugetlbfs_sb_info { 531 long max_inodes; /* inodes allowed */ 532 long free_inodes; /* inodes free */ 533 spinlock_t stat_lock; 534 struct hstate *hstate; 535 struct hugepage_subpool *spool; 536 kuid_t uid; 537 kgid_t gid; 538 umode_t mode; 539}; 540 541static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) 542{ 543 return sb->s_fs_info; 544} 545 546struct hugetlbfs_inode_info { 547 struct shared_policy policy; 548 struct inode vfs_inode; 549 unsigned int seals; 550}; 551 552static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode) 553{ 554 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode); 555} 556 557extern const struct file_operations hugetlbfs_file_operations; 558extern const struct vm_operations_struct hugetlb_vm_ops; 559struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct, 560 int creat_flags, int page_size_log); 561 562static inline bool is_file_hugepages(struct file *file) 563{ 564 if (file->f_op == &hugetlbfs_file_operations) 565 return true; 566 567 return is_file_shm_hugepages(file); 568} 569 570static inline struct hstate *hstate_inode(struct inode *i) 571{ 572 return HUGETLBFS_SB(i->i_sb)->hstate; 573} 574#else /* !CONFIG_HUGETLBFS */ 575 576#define is_file_hugepages(file) false 577static inline struct file * 578hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag, 579 int creat_flags, int page_size_log) 580{ 581 return ERR_PTR(-ENOSYS); 582} 583 584static inline struct hstate *hstate_inode(struct inode *i) 585{ 586 return NULL; 587} 588#endif /* !CONFIG_HUGETLBFS */ 589 590#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA 591unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 592 unsigned long len, unsigned long pgoff, 593 unsigned long flags); 594#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */ 595 596unsigned long 597generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 598 unsigned long len, unsigned long pgoff, 599 unsigned long flags); 600 601/* 602 * huegtlb page specific state flags. These flags are located in page.private 603 * of the hugetlb head page. Functions created via the below macros should be 604 * used to manipulate these flags. 605 * 606 * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at 607 * allocation time. Cleared when page is fully instantiated. Free 608 * routine checks flag to restore a reservation on error paths. 609 * Synchronization: Examined or modified by code that knows it has 610 * the only reference to page. i.e. After allocation but before use 611 * or when the page is being freed. 612 * HPG_migratable - Set after a newly allocated page is added to the page 613 * cache and/or page tables. Indicates the page is a candidate for 614 * migration. 615 * Synchronization: Initially set after new page allocation with no 616 * locking. When examined and modified during migration processing 617 * (isolate, migrate, putback) the hugetlb_lock is held. 618 * HPG_temporary - Set on a page that is temporarily allocated from the buddy 619 * allocator. Typically used for migration target pages when no pages 620 * are available in the pool. The hugetlb free page path will 621 * immediately free pages with this flag set to the buddy allocator. 622 * Synchronization: Can be set after huge page allocation from buddy when 623 * code knows it has only reference. All other examinations and 624 * modifications require hugetlb_lock. 625 * HPG_freed - Set when page is on the free lists. 626 * Synchronization: hugetlb_lock held for examination and modification. 627 * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed. 628 * HPG_raw_hwp_unreliable - Set when the hugetlb page has a hwpoison sub-page 629 * that is not tracked by raw_hwp_page list. 630 */ 631enum hugetlb_page_flags { 632 HPG_restore_reserve = 0, 633 HPG_migratable, 634 HPG_temporary, 635 HPG_freed, 636 HPG_vmemmap_optimized, 637 HPG_raw_hwp_unreliable, 638 __NR_HPAGEFLAGS, 639}; 640 641/* 642 * Macros to create test, set and clear function definitions for 643 * hugetlb specific page flags. 644 */ 645#ifdef CONFIG_HUGETLB_PAGE 646#define TESTHPAGEFLAG(uname, flname) \ 647static __always_inline \ 648bool folio_test_hugetlb_##flname(struct folio *folio) \ 649 { void *private = &folio->private; \ 650 return test_bit(HPG_##flname, private); \ 651 } \ 652static inline int HPage##uname(struct page *page) \ 653 { return test_bit(HPG_##flname, &(page->private)); } 654 655#define SETHPAGEFLAG(uname, flname) \ 656static __always_inline \ 657void folio_set_hugetlb_##flname(struct folio *folio) \ 658 { void *private = &folio->private; \ 659 set_bit(HPG_##flname, private); \ 660 } \ 661static inline void SetHPage##uname(struct page *page) \ 662 { set_bit(HPG_##flname, &(page->private)); } 663 664#define CLEARHPAGEFLAG(uname, flname) \ 665static __always_inline \ 666void folio_clear_hugetlb_##flname(struct folio *folio) \ 667 { void *private = &folio->private; \ 668 clear_bit(HPG_##flname, private); \ 669 } \ 670static inline void ClearHPage##uname(struct page *page) \ 671 { clear_bit(HPG_##flname, &(page->private)); } 672#else 673#define TESTHPAGEFLAG(uname, flname) \ 674static inline bool \ 675folio_test_hugetlb_##flname(struct folio *folio) \ 676 { return 0; } \ 677static inline int HPage##uname(struct page *page) \ 678 { return 0; } 679 680#define SETHPAGEFLAG(uname, flname) \ 681static inline void \ 682folio_set_hugetlb_##flname(struct folio *folio) \ 683 { } \ 684static inline void SetHPage##uname(struct page *page) \ 685 { } 686 687#define CLEARHPAGEFLAG(uname, flname) \ 688static inline void \ 689folio_clear_hugetlb_##flname(struct folio *folio) \ 690 { } \ 691static inline void ClearHPage##uname(struct page *page) \ 692 { } 693#endif 694 695#define HPAGEFLAG(uname, flname) \ 696 TESTHPAGEFLAG(uname, flname) \ 697 SETHPAGEFLAG(uname, flname) \ 698 CLEARHPAGEFLAG(uname, flname) \ 699 700/* 701 * Create functions associated with hugetlb page flags 702 */ 703HPAGEFLAG(RestoreReserve, restore_reserve) 704HPAGEFLAG(Migratable, migratable) 705HPAGEFLAG(Temporary, temporary) 706HPAGEFLAG(Freed, freed) 707HPAGEFLAG(VmemmapOptimized, vmemmap_optimized) 708HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable) 709 710#ifdef CONFIG_HUGETLB_PAGE 711 712#define HSTATE_NAME_LEN 32 713/* Defines one hugetlb page size */ 714struct hstate { 715 struct mutex resize_lock; 716 int next_nid_to_alloc; 717 int next_nid_to_free; 718 unsigned int order; 719 unsigned int demote_order; 720 unsigned long mask; 721 unsigned long max_huge_pages; 722 unsigned long nr_huge_pages; 723 unsigned long free_huge_pages; 724 unsigned long resv_huge_pages; 725 unsigned long surplus_huge_pages; 726 unsigned long nr_overcommit_huge_pages; 727 struct list_head hugepage_activelist; 728 struct list_head hugepage_freelists[MAX_NUMNODES]; 729 unsigned int max_huge_pages_node[MAX_NUMNODES]; 730 unsigned int nr_huge_pages_node[MAX_NUMNODES]; 731 unsigned int free_huge_pages_node[MAX_NUMNODES]; 732 unsigned int surplus_huge_pages_node[MAX_NUMNODES]; 733#ifdef CONFIG_CGROUP_HUGETLB 734 /* cgroup control files */ 735 struct cftype cgroup_files_dfl[8]; 736 struct cftype cgroup_files_legacy[10]; 737#endif 738 char name[HSTATE_NAME_LEN]; 739}; 740 741struct huge_bootmem_page { 742 struct list_head list; 743 struct hstate *hstate; 744}; 745 746int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list); 747struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, 748 unsigned long addr, int avoid_reserve); 749struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, 750 nodemask_t *nmask, gfp_t gfp_mask); 751struct folio *alloc_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *vma, 752 unsigned long address); 753int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping, 754 pgoff_t idx); 755void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma, 756 unsigned long address, struct folio *folio); 757 758/* arch callback */ 759int __init __alloc_bootmem_huge_page(struct hstate *h, int nid); 760int __init alloc_bootmem_huge_page(struct hstate *h, int nid); 761bool __init hugetlb_node_alloc_supported(void); 762 763void __init hugetlb_add_hstate(unsigned order); 764bool __init arch_hugetlb_valid_size(unsigned long size); 765struct hstate *size_to_hstate(unsigned long size); 766 767#ifndef HUGE_MAX_HSTATE 768#define HUGE_MAX_HSTATE 1 769#endif 770 771extern struct hstate hstates[HUGE_MAX_HSTATE]; 772extern unsigned int default_hstate_idx; 773 774#define default_hstate (hstates[default_hstate_idx]) 775 776static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio) 777{ 778 return folio->_hugetlb_subpool; 779} 780 781static inline void hugetlb_set_folio_subpool(struct folio *folio, 782 struct hugepage_subpool *subpool) 783{ 784 folio->_hugetlb_subpool = subpool; 785} 786 787static inline struct hstate *hstate_file(struct file *f) 788{ 789 return hstate_inode(file_inode(f)); 790} 791 792static inline struct hstate *hstate_sizelog(int page_size_log) 793{ 794 if (!page_size_log) 795 return &default_hstate; 796 797 if (page_size_log < BITS_PER_LONG) 798 return size_to_hstate(1UL << page_size_log); 799 800 return NULL; 801} 802 803static inline struct hstate *hstate_vma(struct vm_area_struct *vma) 804{ 805 return hstate_file(vma->vm_file); 806} 807 808static inline unsigned long huge_page_size(const struct hstate *h) 809{ 810 return (unsigned long)PAGE_SIZE << h->order; 811} 812 813extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma); 814 815extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma); 816 817static inline unsigned long huge_page_mask(struct hstate *h) 818{ 819 return h->mask; 820} 821 822static inline unsigned int huge_page_order(struct hstate *h) 823{ 824 return h->order; 825} 826 827static inline unsigned huge_page_shift(struct hstate *h) 828{ 829 return h->order + PAGE_SHIFT; 830} 831 832static inline bool hstate_is_gigantic(struct hstate *h) 833{ 834 return huge_page_order(h) > MAX_ORDER; 835} 836 837static inline unsigned int pages_per_huge_page(const struct hstate *h) 838{ 839 return 1 << h->order; 840} 841 842static inline unsigned int blocks_per_huge_page(struct hstate *h) 843{ 844 return huge_page_size(h) / 512; 845} 846 847#include <asm/hugetlb.h> 848 849#ifndef is_hugepage_only_range 850static inline int is_hugepage_only_range(struct mm_struct *mm, 851 unsigned long addr, unsigned long len) 852{ 853 return 0; 854} 855#define is_hugepage_only_range is_hugepage_only_range 856#endif 857 858#ifndef arch_clear_hugepage_flags 859static inline void arch_clear_hugepage_flags(struct page *page) { } 860#define arch_clear_hugepage_flags arch_clear_hugepage_flags 861#endif 862 863#ifndef arch_make_huge_pte 864static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, 865 vm_flags_t flags) 866{ 867 return pte_mkhuge(entry); 868} 869#endif 870 871static inline struct hstate *folio_hstate(struct folio *folio) 872{ 873 VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio); 874 return size_to_hstate(folio_size(folio)); 875} 876 877static inline unsigned hstate_index_to_shift(unsigned index) 878{ 879 return hstates[index].order + PAGE_SHIFT; 880} 881 882static inline int hstate_index(struct hstate *h) 883{ 884 return h - hstates; 885} 886 887extern int dissolve_free_huge_page(struct page *page); 888extern int dissolve_free_huge_pages(unsigned long start_pfn, 889 unsigned long end_pfn); 890 891#ifdef CONFIG_MEMORY_FAILURE 892extern void folio_clear_hugetlb_hwpoison(struct folio *folio); 893#else 894static inline void folio_clear_hugetlb_hwpoison(struct folio *folio) 895{ 896} 897#endif 898 899#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION 900#ifndef arch_hugetlb_migration_supported 901static inline bool arch_hugetlb_migration_supported(struct hstate *h) 902{ 903 if ((huge_page_shift(h) == PMD_SHIFT) || 904 (huge_page_shift(h) == PUD_SHIFT) || 905 (huge_page_shift(h) == PGDIR_SHIFT)) 906 return true; 907 else 908 return false; 909} 910#endif 911#else 912static inline bool arch_hugetlb_migration_supported(struct hstate *h) 913{ 914 return false; 915} 916#endif 917 918static inline bool hugepage_migration_supported(struct hstate *h) 919{ 920 return arch_hugetlb_migration_supported(h); 921} 922 923/* 924 * Movability check is different as compared to migration check. 925 * It determines whether or not a huge page should be placed on 926 * movable zone or not. Movability of any huge page should be 927 * required only if huge page size is supported for migration. 928 * There won't be any reason for the huge page to be movable if 929 * it is not migratable to start with. Also the size of the huge 930 * page should be large enough to be placed under a movable zone 931 * and still feasible enough to be migratable. Just the presence 932 * in movable zone does not make the migration feasible. 933 * 934 * So even though large huge page sizes like the gigantic ones 935 * are migratable they should not be movable because its not 936 * feasible to migrate them from movable zone. 937 */ 938static inline bool hugepage_movable_supported(struct hstate *h) 939{ 940 if (!hugepage_migration_supported(h)) 941 return false; 942 943 if (hstate_is_gigantic(h)) 944 return false; 945 return true; 946} 947 948/* Movability of hugepages depends on migration support. */ 949static inline gfp_t htlb_alloc_mask(struct hstate *h) 950{ 951 if (hugepage_movable_supported(h)) 952 return GFP_HIGHUSER_MOVABLE; 953 else 954 return GFP_HIGHUSER; 955} 956 957static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask) 958{ 959 gfp_t modified_mask = htlb_alloc_mask(h); 960 961 /* Some callers might want to enforce node */ 962 modified_mask |= (gfp_mask & __GFP_THISNODE); 963 964 modified_mask |= (gfp_mask & __GFP_NOWARN); 965 966 return modified_mask; 967} 968 969static inline spinlock_t *huge_pte_lockptr(struct hstate *h, 970 struct mm_struct *mm, pte_t *pte) 971{ 972 if (huge_page_size(h) == PMD_SIZE) 973 return pmd_lockptr(mm, (pmd_t *) pte); 974 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE); 975 return &mm->page_table_lock; 976} 977 978#ifndef hugepages_supported 979/* 980 * Some platform decide whether they support huge pages at boot 981 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0 982 * when there is no such support 983 */ 984#define hugepages_supported() (HPAGE_SHIFT != 0) 985#endif 986 987void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm); 988 989static inline void hugetlb_count_init(struct mm_struct *mm) 990{ 991 atomic_long_set(&mm->hugetlb_usage, 0); 992} 993 994static inline void hugetlb_count_add(long l, struct mm_struct *mm) 995{ 996 atomic_long_add(l, &mm->hugetlb_usage); 997} 998 999static inline void hugetlb_count_sub(long l, struct mm_struct *mm) 1000{ 1001 atomic_long_sub(l, &mm->hugetlb_usage); 1002} 1003 1004#ifndef huge_ptep_modify_prot_start 1005#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start 1006static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, 1007 unsigned long addr, pte_t *ptep) 1008{ 1009 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); 1010} 1011#endif 1012 1013#ifndef huge_ptep_modify_prot_commit 1014#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit 1015static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, 1016 unsigned long addr, pte_t *ptep, 1017 pte_t old_pte, pte_t pte) 1018{ 1019 unsigned long psize = huge_page_size(hstate_vma(vma)); 1020 1021 set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize); 1022} 1023#endif 1024 1025#ifdef CONFIG_NUMA 1026void hugetlb_register_node(struct node *node); 1027void hugetlb_unregister_node(struct node *node); 1028#endif 1029 1030/* 1031 * Check if a given raw @page in a hugepage is HWPOISON. 1032 */ 1033bool is_raw_hwpoison_page_in_hugepage(struct page *page); 1034 1035#else /* CONFIG_HUGETLB_PAGE */ 1036struct hstate {}; 1037 1038static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio) 1039{ 1040 return NULL; 1041} 1042 1043static inline int isolate_or_dissolve_huge_page(struct page *page, 1044 struct list_head *list) 1045{ 1046 return -ENOMEM; 1047} 1048 1049static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, 1050 unsigned long addr, 1051 int avoid_reserve) 1052{ 1053 return NULL; 1054} 1055 1056static inline struct folio * 1057alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, 1058 nodemask_t *nmask, gfp_t gfp_mask) 1059{ 1060 return NULL; 1061} 1062 1063static inline struct folio *alloc_hugetlb_folio_vma(struct hstate *h, 1064 struct vm_area_struct *vma, 1065 unsigned long address) 1066{ 1067 return NULL; 1068} 1069 1070static inline int __alloc_bootmem_huge_page(struct hstate *h) 1071{ 1072 return 0; 1073} 1074 1075static inline struct hstate *hstate_file(struct file *f) 1076{ 1077 return NULL; 1078} 1079 1080static inline struct hstate *hstate_sizelog(int page_size_log) 1081{ 1082 return NULL; 1083} 1084 1085static inline struct hstate *hstate_vma(struct vm_area_struct *vma) 1086{ 1087 return NULL; 1088} 1089 1090static inline struct hstate *folio_hstate(struct folio *folio) 1091{ 1092 return NULL; 1093} 1094 1095static inline struct hstate *size_to_hstate(unsigned long size) 1096{ 1097 return NULL; 1098} 1099 1100static inline unsigned long huge_page_size(struct hstate *h) 1101{ 1102 return PAGE_SIZE; 1103} 1104 1105static inline unsigned long huge_page_mask(struct hstate *h) 1106{ 1107 return PAGE_MASK; 1108} 1109 1110static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) 1111{ 1112 return PAGE_SIZE; 1113} 1114 1115static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) 1116{ 1117 return PAGE_SIZE; 1118} 1119 1120static inline unsigned int huge_page_order(struct hstate *h) 1121{ 1122 return 0; 1123} 1124 1125static inline unsigned int huge_page_shift(struct hstate *h) 1126{ 1127 return PAGE_SHIFT; 1128} 1129 1130static inline bool hstate_is_gigantic(struct hstate *h) 1131{ 1132 return false; 1133} 1134 1135static inline unsigned int pages_per_huge_page(struct hstate *h) 1136{ 1137 return 1; 1138} 1139 1140static inline unsigned hstate_index_to_shift(unsigned index) 1141{ 1142 return 0; 1143} 1144 1145static inline int hstate_index(struct hstate *h) 1146{ 1147 return 0; 1148} 1149 1150static inline int dissolve_free_huge_page(struct page *page) 1151{ 1152 return 0; 1153} 1154 1155static inline int dissolve_free_huge_pages(unsigned long start_pfn, 1156 unsigned long end_pfn) 1157{ 1158 return 0; 1159} 1160 1161static inline bool hugepage_migration_supported(struct hstate *h) 1162{ 1163 return false; 1164} 1165 1166static inline bool hugepage_movable_supported(struct hstate *h) 1167{ 1168 return false; 1169} 1170 1171static inline gfp_t htlb_alloc_mask(struct hstate *h) 1172{ 1173 return 0; 1174} 1175 1176static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask) 1177{ 1178 return 0; 1179} 1180 1181static inline spinlock_t *huge_pte_lockptr(struct hstate *h, 1182 struct mm_struct *mm, pte_t *pte) 1183{ 1184 return &mm->page_table_lock; 1185} 1186 1187static inline void hugetlb_count_init(struct mm_struct *mm) 1188{ 1189} 1190 1191static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m) 1192{ 1193} 1194 1195static inline void hugetlb_count_sub(long l, struct mm_struct *mm) 1196{ 1197} 1198 1199static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, 1200 unsigned long addr, pte_t *ptep) 1201{ 1202#ifdef CONFIG_MMU 1203 return ptep_get(ptep); 1204#else 1205 return *ptep; 1206#endif 1207} 1208 1209static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 1210 pte_t *ptep, pte_t pte, unsigned long sz) 1211{ 1212} 1213 1214static inline void hugetlb_register_node(struct node *node) 1215{ 1216} 1217 1218static inline void hugetlb_unregister_node(struct node *node) 1219{ 1220} 1221#endif /* CONFIG_HUGETLB_PAGE */ 1222 1223static inline spinlock_t *huge_pte_lock(struct hstate *h, 1224 struct mm_struct *mm, pte_t *pte) 1225{ 1226 spinlock_t *ptl; 1227 1228 ptl = huge_pte_lockptr(h, mm, pte); 1229 spin_lock(ptl); 1230 return ptl; 1231} 1232 1233#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA) 1234extern void __init hugetlb_cma_reserve(int order); 1235#else 1236static inline __init void hugetlb_cma_reserve(int order) 1237{ 1238} 1239#endif 1240 1241#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE 1242static inline bool hugetlb_pmd_shared(pte_t *pte) 1243{ 1244 return page_count(virt_to_page(pte)) > 1; 1245} 1246#else 1247static inline bool hugetlb_pmd_shared(pte_t *pte) 1248{ 1249 return false; 1250} 1251#endif 1252 1253bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr); 1254 1255#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE 1256/* 1257 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can 1258 * implement this. 1259 */ 1260#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) 1261#endif 1262 1263static inline bool __vma_shareable_lock(struct vm_area_struct *vma) 1264{ 1265 return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data; 1266} 1267 1268static inline bool __vma_private_lock(struct vm_area_struct *vma) 1269{ 1270 return (!(vma->vm_flags & VM_MAYSHARE)) && vma->vm_private_data; 1271} 1272 1273/* 1274 * Safe version of huge_pte_offset() to check the locks. See comments 1275 * above huge_pte_offset(). 1276 */ 1277static inline pte_t * 1278hugetlb_walk(struct vm_area_struct *vma, unsigned long addr, unsigned long sz) 1279{ 1280#if defined(CONFIG_HUGETLB_PAGE) && \ 1281 defined(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && defined(CONFIG_LOCKDEP) 1282 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 1283 1284 /* 1285 * If pmd sharing possible, locking needed to safely walk the 1286 * hugetlb pgtables. More information can be found at the comment 1287 * above huge_pte_offset() in the same file. 1288 * 1289 * NOTE: lockdep_is_held() is only defined with CONFIG_LOCKDEP. 1290 */ 1291 if (__vma_shareable_lock(vma)) 1292 WARN_ON_ONCE(!lockdep_is_held(&vma_lock->rw_sema) && 1293 !lockdep_is_held( 1294 &vma->vm_file->f_mapping->i_mmap_rwsem)); 1295#endif 1296 return huge_pte_offset(vma->vm_mm, addr, sz); 1297} 1298 1299#endif /* _LINUX_HUGETLB_H */