Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.13-rc6 1071 lines 28 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_HUGETLB_H 3#define _LINUX_HUGETLB_H 4 5#include <linux/mm_types.h> 6#include <linux/mmdebug.h> 7#include <linux/fs.h> 8#include <linux/hugetlb_inline.h> 9#include <linux/cgroup.h> 10#include <linux/list.h> 11#include <linux/kref.h> 12#include <linux/pgtable.h> 13#include <linux/gfp.h> 14#include <linux/userfaultfd_k.h> 15 16struct ctl_table; 17struct user_struct; 18struct mmu_gather; 19 20#ifndef is_hugepd 21typedef struct { unsigned long pd; } hugepd_t; 22#define is_hugepd(hugepd) (0) 23#define __hugepd(x) ((hugepd_t) { (x) }) 24#endif 25 26#ifdef CONFIG_HUGETLB_PAGE 27 28#include <linux/mempolicy.h> 29#include <linux/shm.h> 30#include <asm/tlbflush.h> 31 32struct hugepage_subpool { 33 spinlock_t lock; 34 long count; 35 long max_hpages; /* Maximum huge pages or -1 if no maximum. */ 36 long used_hpages; /* Used count against maximum, includes */ 37 /* both alloced and reserved pages. */ 38 struct hstate *hstate; 39 long min_hpages; /* Minimum huge pages or -1 if no minimum. */ 40 long rsv_hpages; /* Pages reserved against global pool to */ 41 /* satisfy minimum size. */ 42}; 43 44struct resv_map { 45 struct kref refs; 46 spinlock_t lock; 47 struct list_head regions; 48 long adds_in_progress; 49 struct list_head region_cache; 50 long region_cache_count; 51#ifdef CONFIG_CGROUP_HUGETLB 52 /* 53 * On private mappings, the counter to uncharge reservations is stored 54 * here. If these fields are 0, then either the mapping is shared, or 55 * cgroup accounting is disabled for this resv_map. 56 */ 57 struct page_counter *reservation_counter; 58 unsigned long pages_per_hpage; 59 struct cgroup_subsys_state *css; 60#endif 61}; 62 63/* 64 * Region tracking -- allows tracking of reservations and instantiated pages 65 * across the pages in a mapping. 66 * 67 * The region data structures are embedded into a resv_map and protected 68 * by a resv_map's lock. The set of regions within the resv_map represent 69 * reservations for huge pages, or huge pages that have already been 70 * instantiated within the map. The from and to elements are huge page 71 * indicies into the associated mapping. from indicates the starting index 72 * of the region. to represents the first index past the end of the region. 73 * 74 * For example, a file region structure with from == 0 and to == 4 represents 75 * four huge pages in a mapping. It is important to note that the to element 76 * represents the first element past the end of the region. This is used in 77 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region. 78 * 79 * Interval notation of the form [from, to) will be used to indicate that 80 * the endpoint from is inclusive and to is exclusive. 81 */ 82struct file_region { 83 struct list_head link; 84 long from; 85 long to; 86#ifdef CONFIG_CGROUP_HUGETLB 87 /* 88 * On shared mappings, each reserved region appears as a struct 89 * file_region in resv_map. These fields hold the info needed to 90 * uncharge each reservation. 91 */ 92 struct page_counter *reservation_counter; 93 struct cgroup_subsys_state *css; 94#endif 95}; 96 97extern struct resv_map *resv_map_alloc(void); 98void resv_map_release(struct kref *ref); 99 100extern spinlock_t hugetlb_lock; 101extern int hugetlb_max_hstate __read_mostly; 102#define for_each_hstate(h) \ 103 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++) 104 105struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, 106 long min_hpages); 107void hugepage_put_subpool(struct hugepage_subpool *spool); 108 109void reset_vma_resv_huge_pages(struct vm_area_struct *vma); 110int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *); 111int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *, 112 loff_t *); 113int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *, 114 loff_t *); 115int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *, 116 loff_t *); 117 118int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); 119long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, 120 struct page **, struct vm_area_struct **, 121 unsigned long *, unsigned long *, long, unsigned int, 122 int *); 123void unmap_hugepage_range(struct vm_area_struct *, 124 unsigned long, unsigned long, struct page *); 125void __unmap_hugepage_range_final(struct mmu_gather *tlb, 126 struct vm_area_struct *vma, 127 unsigned long start, unsigned long end, 128 struct page *ref_page); 129void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, 130 unsigned long start, unsigned long end, 131 struct page *ref_page); 132void hugetlb_report_meminfo(struct seq_file *); 133int hugetlb_report_node_meminfo(char *buf, int len, int nid); 134void hugetlb_show_meminfo(void); 135unsigned long hugetlb_total_pages(void); 136vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 137 unsigned long address, unsigned int flags); 138#ifdef CONFIG_USERFAULTFD 139int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte, 140 struct vm_area_struct *dst_vma, 141 unsigned long dst_addr, 142 unsigned long src_addr, 143 enum mcopy_atomic_mode mode, 144 struct page **pagep); 145#endif /* CONFIG_USERFAULTFD */ 146bool hugetlb_reserve_pages(struct inode *inode, long from, long to, 147 struct vm_area_struct *vma, 148 vm_flags_t vm_flags); 149long hugetlb_unreserve_pages(struct inode *inode, long start, long end, 150 long freed); 151bool isolate_huge_page(struct page *page, struct list_head *list); 152void putback_active_hugepage(struct page *page); 153void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason); 154void free_huge_page(struct page *page); 155void hugetlb_fix_reserve_counts(struct inode *inode); 156extern struct mutex *hugetlb_fault_mutex_table; 157u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx); 158 159pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, 160 unsigned long addr, pud_t *pud); 161 162struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage); 163 164extern int sysctl_hugetlb_shm_group; 165extern struct list_head huge_boot_pages; 166 167/* arch callbacks */ 168 169pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, 170 unsigned long addr, unsigned long sz); 171pte_t *huge_pte_offset(struct mm_struct *mm, 172 unsigned long addr, unsigned long sz); 173int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, 174 unsigned long *addr, pte_t *ptep); 175void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, 176 unsigned long *start, unsigned long *end); 177struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, 178 int write); 179struct page *follow_huge_pd(struct vm_area_struct *vma, 180 unsigned long address, hugepd_t hpd, 181 int flags, int pdshift); 182struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, 183 pmd_t *pmd, int flags); 184struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address, 185 pud_t *pud, int flags); 186struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address, 187 pgd_t *pgd, int flags); 188 189int pmd_huge(pmd_t pmd); 190int pud_huge(pud_t pud); 191unsigned long hugetlb_change_protection(struct vm_area_struct *vma, 192 unsigned long address, unsigned long end, pgprot_t newprot); 193 194bool is_hugetlb_entry_migration(pte_t pte); 195void hugetlb_unshare_all_pmds(struct vm_area_struct *vma); 196 197#else /* !CONFIG_HUGETLB_PAGE */ 198 199static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) 200{ 201} 202 203static inline unsigned long hugetlb_total_pages(void) 204{ 205 return 0; 206} 207 208static inline struct address_space *hugetlb_page_mapping_lock_write( 209 struct page *hpage) 210{ 211 return NULL; 212} 213 214static inline int huge_pmd_unshare(struct mm_struct *mm, 215 struct vm_area_struct *vma, 216 unsigned long *addr, pte_t *ptep) 217{ 218 return 0; 219} 220 221static inline void adjust_range_if_pmd_sharing_possible( 222 struct vm_area_struct *vma, 223 unsigned long *start, unsigned long *end) 224{ 225} 226 227static inline long follow_hugetlb_page(struct mm_struct *mm, 228 struct vm_area_struct *vma, struct page **pages, 229 struct vm_area_struct **vmas, unsigned long *position, 230 unsigned long *nr_pages, long i, unsigned int flags, 231 int *nonblocking) 232{ 233 BUG(); 234 return 0; 235} 236 237static inline struct page *follow_huge_addr(struct mm_struct *mm, 238 unsigned long address, int write) 239{ 240 return ERR_PTR(-EINVAL); 241} 242 243static inline int copy_hugetlb_page_range(struct mm_struct *dst, 244 struct mm_struct *src, struct vm_area_struct *vma) 245{ 246 BUG(); 247 return 0; 248} 249 250static inline void hugetlb_report_meminfo(struct seq_file *m) 251{ 252} 253 254static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid) 255{ 256 return 0; 257} 258 259static inline void hugetlb_show_meminfo(void) 260{ 261} 262 263static inline struct page *follow_huge_pd(struct vm_area_struct *vma, 264 unsigned long address, hugepd_t hpd, int flags, 265 int pdshift) 266{ 267 return NULL; 268} 269 270static inline struct page *follow_huge_pmd(struct mm_struct *mm, 271 unsigned long address, pmd_t *pmd, int flags) 272{ 273 return NULL; 274} 275 276static inline struct page *follow_huge_pud(struct mm_struct *mm, 277 unsigned long address, pud_t *pud, int flags) 278{ 279 return NULL; 280} 281 282static inline struct page *follow_huge_pgd(struct mm_struct *mm, 283 unsigned long address, pgd_t *pgd, int flags) 284{ 285 return NULL; 286} 287 288static inline int prepare_hugepage_range(struct file *file, 289 unsigned long addr, unsigned long len) 290{ 291 return -EINVAL; 292} 293 294static inline int pmd_huge(pmd_t pmd) 295{ 296 return 0; 297} 298 299static inline int pud_huge(pud_t pud) 300{ 301 return 0; 302} 303 304static inline int is_hugepage_only_range(struct mm_struct *mm, 305 unsigned long addr, unsigned long len) 306{ 307 return 0; 308} 309 310static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, 311 unsigned long addr, unsigned long end, 312 unsigned long floor, unsigned long ceiling) 313{ 314 BUG(); 315} 316 317#ifdef CONFIG_USERFAULTFD 318static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, 319 pte_t *dst_pte, 320 struct vm_area_struct *dst_vma, 321 unsigned long dst_addr, 322 unsigned long src_addr, 323 enum mcopy_atomic_mode mode, 324 struct page **pagep) 325{ 326 BUG(); 327 return 0; 328} 329#endif /* CONFIG_USERFAULTFD */ 330 331static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, 332 unsigned long sz) 333{ 334 return NULL; 335} 336 337static inline bool isolate_huge_page(struct page *page, struct list_head *list) 338{ 339 return false; 340} 341 342static inline void putback_active_hugepage(struct page *page) 343{ 344} 345 346static inline void move_hugetlb_state(struct page *oldpage, 347 struct page *newpage, int reason) 348{ 349} 350 351static inline unsigned long hugetlb_change_protection( 352 struct vm_area_struct *vma, unsigned long address, 353 unsigned long end, pgprot_t newprot) 354{ 355 return 0; 356} 357 358static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb, 359 struct vm_area_struct *vma, unsigned long start, 360 unsigned long end, struct page *ref_page) 361{ 362 BUG(); 363} 364 365static inline void __unmap_hugepage_range(struct mmu_gather *tlb, 366 struct vm_area_struct *vma, unsigned long start, 367 unsigned long end, struct page *ref_page) 368{ 369 BUG(); 370} 371 372static inline vm_fault_t hugetlb_fault(struct mm_struct *mm, 373 struct vm_area_struct *vma, unsigned long address, 374 unsigned int flags) 375{ 376 BUG(); 377 return 0; 378} 379 380static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { } 381 382#endif /* !CONFIG_HUGETLB_PAGE */ 383/* 384 * hugepages at page global directory. If arch support 385 * hugepages at pgd level, they need to define this. 386 */ 387#ifndef pgd_huge 388#define pgd_huge(x) 0 389#endif 390#ifndef p4d_huge 391#define p4d_huge(x) 0 392#endif 393 394#ifndef pgd_write 395static inline int pgd_write(pgd_t pgd) 396{ 397 BUG(); 398 return 0; 399} 400#endif 401 402#define HUGETLB_ANON_FILE "anon_hugepage" 403 404enum { 405 /* 406 * The file will be used as an shm file so shmfs accounting rules 407 * apply 408 */ 409 HUGETLB_SHMFS_INODE = 1, 410 /* 411 * The file is being created on the internal vfs mount and shmfs 412 * accounting rules do not apply 413 */ 414 HUGETLB_ANONHUGE_INODE = 2, 415}; 416 417#ifdef CONFIG_HUGETLBFS 418struct hugetlbfs_sb_info { 419 long max_inodes; /* inodes allowed */ 420 long free_inodes; /* inodes free */ 421 spinlock_t stat_lock; 422 struct hstate *hstate; 423 struct hugepage_subpool *spool; 424 kuid_t uid; 425 kgid_t gid; 426 umode_t mode; 427}; 428 429static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) 430{ 431 return sb->s_fs_info; 432} 433 434struct hugetlbfs_inode_info { 435 struct shared_policy policy; 436 struct inode vfs_inode; 437 unsigned int seals; 438}; 439 440static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode) 441{ 442 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode); 443} 444 445extern const struct file_operations hugetlbfs_file_operations; 446extern const struct vm_operations_struct hugetlb_vm_ops; 447struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct, 448 struct user_struct **user, int creat_flags, 449 int page_size_log); 450 451static inline bool is_file_hugepages(struct file *file) 452{ 453 if (file->f_op == &hugetlbfs_file_operations) 454 return true; 455 456 return is_file_shm_hugepages(file); 457} 458 459static inline struct hstate *hstate_inode(struct inode *i) 460{ 461 return HUGETLBFS_SB(i->i_sb)->hstate; 462} 463#else /* !CONFIG_HUGETLBFS */ 464 465#define is_file_hugepages(file) false 466static inline struct file * 467hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag, 468 struct user_struct **user, int creat_flags, 469 int page_size_log) 470{ 471 return ERR_PTR(-ENOSYS); 472} 473 474static inline struct hstate *hstate_inode(struct inode *i) 475{ 476 return NULL; 477} 478#endif /* !CONFIG_HUGETLBFS */ 479 480#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA 481unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 482 unsigned long len, unsigned long pgoff, 483 unsigned long flags); 484#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */ 485 486/* 487 * huegtlb page specific state flags. These flags are located in page.private 488 * of the hugetlb head page. Functions created via the below macros should be 489 * used to manipulate these flags. 490 * 491 * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at 492 * allocation time. Cleared when page is fully instantiated. Free 493 * routine checks flag to restore a reservation on error paths. 494 * Synchronization: Examined or modified by code that knows it has 495 * the only reference to page. i.e. After allocation but before use 496 * or when the page is being freed. 497 * HPG_migratable - Set after a newly allocated page is added to the page 498 * cache and/or page tables. Indicates the page is a candidate for 499 * migration. 500 * Synchronization: Initially set after new page allocation with no 501 * locking. When examined and modified during migration processing 502 * (isolate, migrate, putback) the hugetlb_lock is held. 503 * HPG_temporary - - Set on a page that is temporarily allocated from the buddy 504 * allocator. Typically used for migration target pages when no pages 505 * are available in the pool. The hugetlb free page path will 506 * immediately free pages with this flag set to the buddy allocator. 507 * Synchronization: Can be set after huge page allocation from buddy when 508 * code knows it has only reference. All other examinations and 509 * modifications require hugetlb_lock. 510 * HPG_freed - Set when page is on the free lists. 511 * Synchronization: hugetlb_lock held for examination and modification. 512 */ 513enum hugetlb_page_flags { 514 HPG_restore_reserve = 0, 515 HPG_migratable, 516 HPG_temporary, 517 HPG_freed, 518 __NR_HPAGEFLAGS, 519}; 520 521/* 522 * Macros to create test, set and clear function definitions for 523 * hugetlb specific page flags. 524 */ 525#ifdef CONFIG_HUGETLB_PAGE 526#define TESTHPAGEFLAG(uname, flname) \ 527static inline int HPage##uname(struct page *page) \ 528 { return test_bit(HPG_##flname, &(page->private)); } 529 530#define SETHPAGEFLAG(uname, flname) \ 531static inline void SetHPage##uname(struct page *page) \ 532 { set_bit(HPG_##flname, &(page->private)); } 533 534#define CLEARHPAGEFLAG(uname, flname) \ 535static inline void ClearHPage##uname(struct page *page) \ 536 { clear_bit(HPG_##flname, &(page->private)); } 537#else 538#define TESTHPAGEFLAG(uname, flname) \ 539static inline int HPage##uname(struct page *page) \ 540 { return 0; } 541 542#define SETHPAGEFLAG(uname, flname) \ 543static inline void SetHPage##uname(struct page *page) \ 544 { } 545 546#define CLEARHPAGEFLAG(uname, flname) \ 547static inline void ClearHPage##uname(struct page *page) \ 548 { } 549#endif 550 551#define HPAGEFLAG(uname, flname) \ 552 TESTHPAGEFLAG(uname, flname) \ 553 SETHPAGEFLAG(uname, flname) \ 554 CLEARHPAGEFLAG(uname, flname) \ 555 556/* 557 * Create functions associated with hugetlb page flags 558 */ 559HPAGEFLAG(RestoreReserve, restore_reserve) 560HPAGEFLAG(Migratable, migratable) 561HPAGEFLAG(Temporary, temporary) 562HPAGEFLAG(Freed, freed) 563 564#ifdef CONFIG_HUGETLB_PAGE 565 566#define HSTATE_NAME_LEN 32 567/* Defines one hugetlb page size */ 568struct hstate { 569 struct mutex resize_lock; 570 int next_nid_to_alloc; 571 int next_nid_to_free; 572 unsigned int order; 573 unsigned long mask; 574 unsigned long max_huge_pages; 575 unsigned long nr_huge_pages; 576 unsigned long free_huge_pages; 577 unsigned long resv_huge_pages; 578 unsigned long surplus_huge_pages; 579 unsigned long nr_overcommit_huge_pages; 580 struct list_head hugepage_activelist; 581 struct list_head hugepage_freelists[MAX_NUMNODES]; 582 unsigned int nr_huge_pages_node[MAX_NUMNODES]; 583 unsigned int free_huge_pages_node[MAX_NUMNODES]; 584 unsigned int surplus_huge_pages_node[MAX_NUMNODES]; 585#ifdef CONFIG_CGROUP_HUGETLB 586 /* cgroup control files */ 587 struct cftype cgroup_files_dfl[7]; 588 struct cftype cgroup_files_legacy[9]; 589#endif 590 char name[HSTATE_NAME_LEN]; 591}; 592 593struct huge_bootmem_page { 594 struct list_head list; 595 struct hstate *hstate; 596}; 597 598int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list); 599struct page *alloc_huge_page(struct vm_area_struct *vma, 600 unsigned long addr, int avoid_reserve); 601struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, 602 nodemask_t *nmask, gfp_t gfp_mask); 603struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, 604 unsigned long address); 605int huge_add_to_page_cache(struct page *page, struct address_space *mapping, 606 pgoff_t idx); 607 608/* arch callback */ 609int __init __alloc_bootmem_huge_page(struct hstate *h); 610int __init alloc_bootmem_huge_page(struct hstate *h); 611 612void __init hugetlb_add_hstate(unsigned order); 613bool __init arch_hugetlb_valid_size(unsigned long size); 614struct hstate *size_to_hstate(unsigned long size); 615 616#ifndef HUGE_MAX_HSTATE 617#define HUGE_MAX_HSTATE 1 618#endif 619 620extern struct hstate hstates[HUGE_MAX_HSTATE]; 621extern unsigned int default_hstate_idx; 622 623#define default_hstate (hstates[default_hstate_idx]) 624 625/* 626 * hugetlb page subpool pointer located in hpage[1].private 627 */ 628static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage) 629{ 630 return (struct hugepage_subpool *)(hpage+1)->private; 631} 632 633static inline void hugetlb_set_page_subpool(struct page *hpage, 634 struct hugepage_subpool *subpool) 635{ 636 set_page_private(hpage+1, (unsigned long)subpool); 637} 638 639static inline struct hstate *hstate_file(struct file *f) 640{ 641 return hstate_inode(file_inode(f)); 642} 643 644static inline struct hstate *hstate_sizelog(int page_size_log) 645{ 646 if (!page_size_log) 647 return &default_hstate; 648 649 return size_to_hstate(1UL << page_size_log); 650} 651 652static inline struct hstate *hstate_vma(struct vm_area_struct *vma) 653{ 654 return hstate_file(vma->vm_file); 655} 656 657static inline unsigned long huge_page_size(struct hstate *h) 658{ 659 return (unsigned long)PAGE_SIZE << h->order; 660} 661 662extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma); 663 664extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma); 665 666static inline unsigned long huge_page_mask(struct hstate *h) 667{ 668 return h->mask; 669} 670 671static inline unsigned int huge_page_order(struct hstate *h) 672{ 673 return h->order; 674} 675 676static inline unsigned huge_page_shift(struct hstate *h) 677{ 678 return h->order + PAGE_SHIFT; 679} 680 681static inline bool hstate_is_gigantic(struct hstate *h) 682{ 683 return huge_page_order(h) >= MAX_ORDER; 684} 685 686static inline unsigned int pages_per_huge_page(struct hstate *h) 687{ 688 return 1 << h->order; 689} 690 691static inline unsigned int blocks_per_huge_page(struct hstate *h) 692{ 693 return huge_page_size(h) / 512; 694} 695 696#include <asm/hugetlb.h> 697 698#ifndef is_hugepage_only_range 699static inline int is_hugepage_only_range(struct mm_struct *mm, 700 unsigned long addr, unsigned long len) 701{ 702 return 0; 703} 704#define is_hugepage_only_range is_hugepage_only_range 705#endif 706 707#ifndef arch_clear_hugepage_flags 708static inline void arch_clear_hugepage_flags(struct page *page) { } 709#define arch_clear_hugepage_flags arch_clear_hugepage_flags 710#endif 711 712#ifndef arch_make_huge_pte 713static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, 714 struct page *page, int writable) 715{ 716 return entry; 717} 718#endif 719 720static inline struct hstate *page_hstate(struct page *page) 721{ 722 VM_BUG_ON_PAGE(!PageHuge(page), page); 723 return size_to_hstate(page_size(page)); 724} 725 726static inline unsigned hstate_index_to_shift(unsigned index) 727{ 728 return hstates[index].order + PAGE_SHIFT; 729} 730 731static inline int hstate_index(struct hstate *h) 732{ 733 return h - hstates; 734} 735 736pgoff_t __basepage_index(struct page *page); 737 738/* Return page->index in PAGE_SIZE units */ 739static inline pgoff_t basepage_index(struct page *page) 740{ 741 if (!PageCompound(page)) 742 return page->index; 743 744 return __basepage_index(page); 745} 746 747extern int dissolve_free_huge_page(struct page *page); 748extern int dissolve_free_huge_pages(unsigned long start_pfn, 749 unsigned long end_pfn); 750 751#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION 752#ifndef arch_hugetlb_migration_supported 753static inline bool arch_hugetlb_migration_supported(struct hstate *h) 754{ 755 if ((huge_page_shift(h) == PMD_SHIFT) || 756 (huge_page_shift(h) == PUD_SHIFT) || 757 (huge_page_shift(h) == PGDIR_SHIFT)) 758 return true; 759 else 760 return false; 761} 762#endif 763#else 764static inline bool arch_hugetlb_migration_supported(struct hstate *h) 765{ 766 return false; 767} 768#endif 769 770static inline bool hugepage_migration_supported(struct hstate *h) 771{ 772 return arch_hugetlb_migration_supported(h); 773} 774 775/* 776 * Movability check is different as compared to migration check. 777 * It determines whether or not a huge page should be placed on 778 * movable zone or not. Movability of any huge page should be 779 * required only if huge page size is supported for migration. 780 * There wont be any reason for the huge page to be movable if 781 * it is not migratable to start with. Also the size of the huge 782 * page should be large enough to be placed under a movable zone 783 * and still feasible enough to be migratable. Just the presence 784 * in movable zone does not make the migration feasible. 785 * 786 * So even though large huge page sizes like the gigantic ones 787 * are migratable they should not be movable because its not 788 * feasible to migrate them from movable zone. 789 */ 790static inline bool hugepage_movable_supported(struct hstate *h) 791{ 792 if (!hugepage_migration_supported(h)) 793 return false; 794 795 if (hstate_is_gigantic(h)) 796 return false; 797 return true; 798} 799 800/* Movability of hugepages depends on migration support. */ 801static inline gfp_t htlb_alloc_mask(struct hstate *h) 802{ 803 if (hugepage_movable_supported(h)) 804 return GFP_HIGHUSER_MOVABLE; 805 else 806 return GFP_HIGHUSER; 807} 808 809static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask) 810{ 811 gfp_t modified_mask = htlb_alloc_mask(h); 812 813 /* Some callers might want to enforce node */ 814 modified_mask |= (gfp_mask & __GFP_THISNODE); 815 816 modified_mask |= (gfp_mask & __GFP_NOWARN); 817 818 return modified_mask; 819} 820 821static inline spinlock_t *huge_pte_lockptr(struct hstate *h, 822 struct mm_struct *mm, pte_t *pte) 823{ 824 if (huge_page_size(h) == PMD_SIZE) 825 return pmd_lockptr(mm, (pmd_t *) pte); 826 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE); 827 return &mm->page_table_lock; 828} 829 830#ifndef hugepages_supported 831/* 832 * Some platform decide whether they support huge pages at boot 833 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0 834 * when there is no such support 835 */ 836#define hugepages_supported() (HPAGE_SHIFT != 0) 837#endif 838 839void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm); 840 841static inline void hugetlb_count_add(long l, struct mm_struct *mm) 842{ 843 atomic_long_add(l, &mm->hugetlb_usage); 844} 845 846static inline void hugetlb_count_sub(long l, struct mm_struct *mm) 847{ 848 atomic_long_sub(l, &mm->hugetlb_usage); 849} 850 851#ifndef set_huge_swap_pte_at 852static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, 853 pte_t *ptep, pte_t pte, unsigned long sz) 854{ 855 set_huge_pte_at(mm, addr, ptep, pte); 856} 857#endif 858 859#ifndef huge_ptep_modify_prot_start 860#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start 861static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, 862 unsigned long addr, pte_t *ptep) 863{ 864 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); 865} 866#endif 867 868#ifndef huge_ptep_modify_prot_commit 869#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit 870static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, 871 unsigned long addr, pte_t *ptep, 872 pte_t old_pte, pte_t pte) 873{ 874 set_huge_pte_at(vma->vm_mm, addr, ptep, pte); 875} 876#endif 877 878#else /* CONFIG_HUGETLB_PAGE */ 879struct hstate {}; 880 881static inline int isolate_or_dissolve_huge_page(struct page *page, 882 struct list_head *list) 883{ 884 return -ENOMEM; 885} 886 887static inline struct page *alloc_huge_page(struct vm_area_struct *vma, 888 unsigned long addr, 889 int avoid_reserve) 890{ 891 return NULL; 892} 893 894static inline struct page * 895alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, 896 nodemask_t *nmask, gfp_t gfp_mask) 897{ 898 return NULL; 899} 900 901static inline struct page *alloc_huge_page_vma(struct hstate *h, 902 struct vm_area_struct *vma, 903 unsigned long address) 904{ 905 return NULL; 906} 907 908static inline int __alloc_bootmem_huge_page(struct hstate *h) 909{ 910 return 0; 911} 912 913static inline struct hstate *hstate_file(struct file *f) 914{ 915 return NULL; 916} 917 918static inline struct hstate *hstate_sizelog(int page_size_log) 919{ 920 return NULL; 921} 922 923static inline struct hstate *hstate_vma(struct vm_area_struct *vma) 924{ 925 return NULL; 926} 927 928static inline struct hstate *page_hstate(struct page *page) 929{ 930 return NULL; 931} 932 933static inline unsigned long huge_page_size(struct hstate *h) 934{ 935 return PAGE_SIZE; 936} 937 938static inline unsigned long huge_page_mask(struct hstate *h) 939{ 940 return PAGE_MASK; 941} 942 943static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) 944{ 945 return PAGE_SIZE; 946} 947 948static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) 949{ 950 return PAGE_SIZE; 951} 952 953static inline unsigned int huge_page_order(struct hstate *h) 954{ 955 return 0; 956} 957 958static inline unsigned int huge_page_shift(struct hstate *h) 959{ 960 return PAGE_SHIFT; 961} 962 963static inline bool hstate_is_gigantic(struct hstate *h) 964{ 965 return false; 966} 967 968static inline unsigned int pages_per_huge_page(struct hstate *h) 969{ 970 return 1; 971} 972 973static inline unsigned hstate_index_to_shift(unsigned index) 974{ 975 return 0; 976} 977 978static inline int hstate_index(struct hstate *h) 979{ 980 return 0; 981} 982 983static inline pgoff_t basepage_index(struct page *page) 984{ 985 return page->index; 986} 987 988static inline int dissolve_free_huge_page(struct page *page) 989{ 990 return 0; 991} 992 993static inline int dissolve_free_huge_pages(unsigned long start_pfn, 994 unsigned long end_pfn) 995{ 996 return 0; 997} 998 999static inline bool hugepage_migration_supported(struct hstate *h) 1000{ 1001 return false; 1002} 1003 1004static inline bool hugepage_movable_supported(struct hstate *h) 1005{ 1006 return false; 1007} 1008 1009static inline gfp_t htlb_alloc_mask(struct hstate *h) 1010{ 1011 return 0; 1012} 1013 1014static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask) 1015{ 1016 return 0; 1017} 1018 1019static inline spinlock_t *huge_pte_lockptr(struct hstate *h, 1020 struct mm_struct *mm, pte_t *pte) 1021{ 1022 return &mm->page_table_lock; 1023} 1024 1025static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m) 1026{ 1027} 1028 1029static inline void hugetlb_count_sub(long l, struct mm_struct *mm) 1030{ 1031} 1032 1033static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, 1034 pte_t *ptep, pte_t pte, unsigned long sz) 1035{ 1036} 1037#endif /* CONFIG_HUGETLB_PAGE */ 1038 1039static inline spinlock_t *huge_pte_lock(struct hstate *h, 1040 struct mm_struct *mm, pte_t *pte) 1041{ 1042 spinlock_t *ptl; 1043 1044 ptl = huge_pte_lockptr(h, mm, pte); 1045 spin_lock(ptl); 1046 return ptl; 1047} 1048 1049#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA) 1050extern void __init hugetlb_cma_reserve(int order); 1051extern void __init hugetlb_cma_check(void); 1052#else 1053static inline __init void hugetlb_cma_reserve(int order) 1054{ 1055} 1056static inline __init void hugetlb_cma_check(void) 1057{ 1058} 1059#endif 1060 1061bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr); 1062 1063#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE 1064/* 1065 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can 1066 * implement this. 1067 */ 1068#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) 1069#endif 1070 1071#endif /* _LINUX_HUGETLB_H */