at v5.9 24 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_HUGETLB_H 3#define _LINUX_HUGETLB_H 4 5#include <linux/mm_types.h> 6#include <linux/mmdebug.h> 7#include <linux/fs.h> 8#include <linux/hugetlb_inline.h> 9#include <linux/cgroup.h> 10#include <linux/list.h> 11#include <linux/kref.h> 12#include <linux/pgtable.h> 13#include <linux/gfp.h> 14 15struct ctl_table; 16struct user_struct; 17struct mmu_gather; 18 19#ifndef is_hugepd 20typedef struct { unsigned long pd; } hugepd_t; 21#define is_hugepd(hugepd) (0) 22#define __hugepd(x) ((hugepd_t) { (x) }) 23#endif 24 25#ifdef CONFIG_HUGETLB_PAGE 26 27#include <linux/mempolicy.h> 28#include <linux/shm.h> 29#include <asm/tlbflush.h> 30 31struct hugepage_subpool { 32 spinlock_t lock; 33 long count; 34 long max_hpages; /* Maximum huge pages or -1 if no maximum. */ 35 long used_hpages; /* Used count against maximum, includes */ 36 /* both alloced and reserved pages. */ 37 struct hstate *hstate; 38 long min_hpages; /* Minimum huge pages or -1 if no minimum. */ 39 long rsv_hpages; /* Pages reserved against global pool to */ 40 /* sasitfy minimum size. */ 41}; 42 43struct resv_map { 44 struct kref refs; 45 spinlock_t lock; 46 struct list_head regions; 47 long adds_in_progress; 48 struct list_head region_cache; 49 long region_cache_count; 50#ifdef CONFIG_CGROUP_HUGETLB 51 /* 52 * On private mappings, the counter to uncharge reservations is stored 53 * here. If these fields are 0, then either the mapping is shared, or 54 * cgroup accounting is disabled for this resv_map. 55 */ 56 struct page_counter *reservation_counter; 57 unsigned long pages_per_hpage; 58 struct cgroup_subsys_state *css; 59#endif 60}; 61 62/* 63 * Region tracking -- allows tracking of reservations and instantiated pages 64 * across the pages in a mapping. 65 * 66 * The region data structures are embedded into a resv_map and protected 67 * by a resv_map's lock. The set of regions within the resv_map represent 68 * reservations for huge pages, or huge pages that have already been 69 * instantiated within the map. The from and to elements are huge page 70 * indicies into the associated mapping. from indicates the starting index 71 * of the region. to represents the first index past the end of the region. 72 * 73 * For example, a file region structure with from == 0 and to == 4 represents 74 * four huge pages in a mapping. It is important to note that the to element 75 * represents the first element past the end of the region. This is used in 76 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region. 77 * 78 * Interval notation of the form [from, to) will be used to indicate that 79 * the endpoint from is inclusive and to is exclusive. 80 */ 81struct file_region { 82 struct list_head link; 83 long from; 84 long to; 85#ifdef CONFIG_CGROUP_HUGETLB 86 /* 87 * On shared mappings, each reserved region appears as a struct 88 * file_region in resv_map. These fields hold the info needed to 89 * uncharge each reservation. 90 */ 91 struct page_counter *reservation_counter; 92 struct cgroup_subsys_state *css; 93#endif 94}; 95 96extern struct resv_map *resv_map_alloc(void); 97void resv_map_release(struct kref *ref); 98 99extern spinlock_t hugetlb_lock; 100extern int hugetlb_max_hstate __read_mostly; 101#define for_each_hstate(h) \ 102 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++) 103 104struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, 105 long min_hpages); 106void hugepage_put_subpool(struct hugepage_subpool *spool); 107 108void reset_vma_resv_huge_pages(struct vm_area_struct *vma); 109int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *); 110int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *, 111 loff_t *); 112int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *, 113 loff_t *); 114int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *, 115 loff_t *); 116 117int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); 118long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, 119 struct page **, struct vm_area_struct **, 120 unsigned long *, unsigned long *, long, unsigned int, 121 int *); 122void unmap_hugepage_range(struct vm_area_struct *, 123 unsigned long, unsigned long, struct page *); 124void __unmap_hugepage_range_final(struct mmu_gather *tlb, 125 struct vm_area_struct *vma, 126 unsigned long start, unsigned long end, 127 struct page *ref_page); 128void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, 129 unsigned long start, unsigned long end, 130 struct page *ref_page); 131void hugetlb_report_meminfo(struct seq_file *); 132int hugetlb_report_node_meminfo(int, char *); 133void hugetlb_show_meminfo(void); 134unsigned long hugetlb_total_pages(void); 135vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 136 unsigned long address, unsigned int flags); 137int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte, 138 struct vm_area_struct *dst_vma, 139 unsigned long dst_addr, 140 unsigned long src_addr, 141 struct page **pagep); 142int hugetlb_reserve_pages(struct inode *inode, long from, long to, 143 struct vm_area_struct *vma, 144 vm_flags_t vm_flags); 145long hugetlb_unreserve_pages(struct inode *inode, long start, long end, 146 long freed); 147bool isolate_huge_page(struct page *page, struct list_head *list); 148void putback_active_hugepage(struct page *page); 149void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason); 150void free_huge_page(struct page *page); 151void hugetlb_fix_reserve_counts(struct inode *inode); 152extern struct mutex *hugetlb_fault_mutex_table; 153u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx); 154 155pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); 156 157struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage); 158 159extern int sysctl_hugetlb_shm_group; 160extern struct list_head huge_boot_pages; 161 162/* arch callbacks */ 163 164pte_t *huge_pte_alloc(struct mm_struct *mm, 165 unsigned long addr, unsigned long sz); 166pte_t *huge_pte_offset(struct mm_struct *mm, 167 unsigned long addr, unsigned long sz); 168int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, 169 unsigned long *addr, pte_t *ptep); 170void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, 171 unsigned long *start, unsigned long *end); 172struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, 173 int write); 174struct page *follow_huge_pd(struct vm_area_struct *vma, 175 unsigned long address, hugepd_t hpd, 176 int flags, int pdshift); 177struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, 178 pmd_t *pmd, int flags); 179struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address, 180 pud_t *pud, int flags); 181struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address, 182 pgd_t *pgd, int flags); 183 184int pmd_huge(pmd_t pmd); 185int pud_huge(pud_t pud); 186unsigned long hugetlb_change_protection(struct vm_area_struct *vma, 187 unsigned long address, unsigned long end, pgprot_t newprot); 188 189bool is_hugetlb_entry_migration(pte_t pte); 190 191#else /* !CONFIG_HUGETLB_PAGE */ 192 193static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) 194{ 195} 196 197static inline unsigned long hugetlb_total_pages(void) 198{ 199 return 0; 200} 201 202static inline struct address_space *hugetlb_page_mapping_lock_write( 203 struct page *hpage) 204{ 205 return NULL; 206} 207 208static inline int huge_pmd_unshare(struct mm_struct *mm, 209 struct vm_area_struct *vma, 210 unsigned long *addr, pte_t *ptep) 211{ 212 return 0; 213} 214 215static inline void adjust_range_if_pmd_sharing_possible( 216 struct vm_area_struct *vma, 217 unsigned long *start, unsigned long *end) 218{ 219} 220 221static inline long follow_hugetlb_page(struct mm_struct *mm, 222 struct vm_area_struct *vma, struct page **pages, 223 struct vm_area_struct **vmas, unsigned long *position, 224 unsigned long *nr_pages, long i, unsigned int flags, 225 int *nonblocking) 226{ 227 BUG(); 228 return 0; 229} 230 231static inline struct page *follow_huge_addr(struct mm_struct *mm, 232 unsigned long address, int write) 233{ 234 return ERR_PTR(-EINVAL); 235} 236 237static inline int copy_hugetlb_page_range(struct mm_struct *dst, 238 struct mm_struct *src, struct vm_area_struct *vma) 239{ 240 BUG(); 241 return 0; 242} 243 244static inline void hugetlb_report_meminfo(struct seq_file *m) 245{ 246} 247 248static inline int hugetlb_report_node_meminfo(int nid, char *buf) 249{ 250 return 0; 251} 252 253static inline void hugetlb_show_meminfo(void) 254{ 255} 256 257static inline struct page *follow_huge_pd(struct vm_area_struct *vma, 258 unsigned long address, hugepd_t hpd, int flags, 259 int pdshift) 260{ 261 return NULL; 262} 263 264static inline struct page *follow_huge_pmd(struct mm_struct *mm, 265 unsigned long address, pmd_t *pmd, int flags) 266{ 267 return NULL; 268} 269 270static inline struct page *follow_huge_pud(struct mm_struct *mm, 271 unsigned long address, pud_t *pud, int flags) 272{ 273 return NULL; 274} 275 276static inline struct page *follow_huge_pgd(struct mm_struct *mm, 277 unsigned long address, pgd_t *pgd, int flags) 278{ 279 return NULL; 280} 281 282static inline int prepare_hugepage_range(struct file *file, 283 unsigned long addr, unsigned long len) 284{ 285 return -EINVAL; 286} 287 288static inline int pmd_huge(pmd_t pmd) 289{ 290 return 0; 291} 292 293static inline int pud_huge(pud_t pud) 294{ 295 return 0; 296} 297 298static inline int is_hugepage_only_range(struct mm_struct *mm, 299 unsigned long addr, unsigned long len) 300{ 301 return 0; 302} 303 304static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, 305 unsigned long addr, unsigned long end, 306 unsigned long floor, unsigned long ceiling) 307{ 308 BUG(); 309} 310 311static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, 312 pte_t *dst_pte, 313 struct vm_area_struct *dst_vma, 314 unsigned long dst_addr, 315 unsigned long src_addr, 316 struct page **pagep) 317{ 318 BUG(); 319 return 0; 320} 321 322static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, 323 unsigned long sz) 324{ 325 return NULL; 326} 327 328static inline bool isolate_huge_page(struct page *page, struct list_head *list) 329{ 330 return false; 331} 332 333static inline void putback_active_hugepage(struct page *page) 334{ 335} 336 337static inline void move_hugetlb_state(struct page *oldpage, 338 struct page *newpage, int reason) 339{ 340} 341 342static inline unsigned long hugetlb_change_protection( 343 struct vm_area_struct *vma, unsigned long address, 344 unsigned long end, pgprot_t newprot) 345{ 346 return 0; 347} 348 349static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb, 350 struct vm_area_struct *vma, unsigned long start, 351 unsigned long end, struct page *ref_page) 352{ 353 BUG(); 354} 355 356static inline void __unmap_hugepage_range(struct mmu_gather *tlb, 357 struct vm_area_struct *vma, unsigned long start, 358 unsigned long end, struct page *ref_page) 359{ 360 BUG(); 361} 362 363static inline vm_fault_t hugetlb_fault(struct mm_struct *mm, 364 struct vm_area_struct *vma, unsigned long address, 365 unsigned int flags) 366{ 367 BUG(); 368 return 0; 369} 370 371#endif /* !CONFIG_HUGETLB_PAGE */ 372/* 373 * hugepages at page global directory. If arch support 374 * hugepages at pgd level, they need to define this. 375 */ 376#ifndef pgd_huge 377#define pgd_huge(x) 0 378#endif 379#ifndef p4d_huge 380#define p4d_huge(x) 0 381#endif 382 383#ifndef pgd_write 384static inline int pgd_write(pgd_t pgd) 385{ 386 BUG(); 387 return 0; 388} 389#endif 390 391#define HUGETLB_ANON_FILE "anon_hugepage" 392 393enum { 394 /* 395 * The file will be used as an shm file so shmfs accounting rules 396 * apply 397 */ 398 HUGETLB_SHMFS_INODE = 1, 399 /* 400 * The file is being created on the internal vfs mount and shmfs 401 * accounting rules do not apply 402 */ 403 HUGETLB_ANONHUGE_INODE = 2, 404}; 405 406#ifdef CONFIG_HUGETLBFS 407struct hugetlbfs_sb_info { 408 long max_inodes; /* inodes allowed */ 409 long free_inodes; /* inodes free */ 410 spinlock_t stat_lock; 411 struct hstate *hstate; 412 struct hugepage_subpool *spool; 413 kuid_t uid; 414 kgid_t gid; 415 umode_t mode; 416}; 417 418static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) 419{ 420 return sb->s_fs_info; 421} 422 423struct hugetlbfs_inode_info { 424 struct shared_policy policy; 425 struct inode vfs_inode; 426 unsigned int seals; 427}; 428 429static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode) 430{ 431 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode); 432} 433 434extern const struct file_operations hugetlbfs_file_operations; 435extern const struct vm_operations_struct hugetlb_vm_ops; 436struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct, 437 struct user_struct **user, int creat_flags, 438 int page_size_log); 439 440static inline bool is_file_hugepages(struct file *file) 441{ 442 if (file->f_op == &hugetlbfs_file_operations) 443 return true; 444 445 return is_file_shm_hugepages(file); 446} 447 448static inline struct hstate *hstate_inode(struct inode *i) 449{ 450 return HUGETLBFS_SB(i->i_sb)->hstate; 451} 452#else /* !CONFIG_HUGETLBFS */ 453 454#define is_file_hugepages(file) false 455static inline struct file * 456hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag, 457 struct user_struct **user, int creat_flags, 458 int page_size_log) 459{ 460 return ERR_PTR(-ENOSYS); 461} 462 463static inline struct hstate *hstate_inode(struct inode *i) 464{ 465 return NULL; 466} 467#endif /* !CONFIG_HUGETLBFS */ 468 469#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA 470unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 471 unsigned long len, unsigned long pgoff, 472 unsigned long flags); 473#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */ 474 475#ifdef CONFIG_HUGETLB_PAGE 476 477#define HSTATE_NAME_LEN 32 478/* Defines one hugetlb page size */ 479struct hstate { 480 int next_nid_to_alloc; 481 int next_nid_to_free; 482 unsigned int order; 483 unsigned long mask; 484 unsigned long max_huge_pages; 485 unsigned long nr_huge_pages; 486 unsigned long free_huge_pages; 487 unsigned long resv_huge_pages; 488 unsigned long surplus_huge_pages; 489 unsigned long nr_overcommit_huge_pages; 490 struct list_head hugepage_activelist; 491 struct list_head hugepage_freelists[MAX_NUMNODES]; 492 unsigned int nr_huge_pages_node[MAX_NUMNODES]; 493 unsigned int free_huge_pages_node[MAX_NUMNODES]; 494 unsigned int surplus_huge_pages_node[MAX_NUMNODES]; 495#ifdef CONFIG_CGROUP_HUGETLB 496 /* cgroup control files */ 497 struct cftype cgroup_files_dfl[7]; 498 struct cftype cgroup_files_legacy[9]; 499#endif 500 char name[HSTATE_NAME_LEN]; 501}; 502 503struct huge_bootmem_page { 504 struct list_head list; 505 struct hstate *hstate; 506}; 507 508struct page *alloc_huge_page(struct vm_area_struct *vma, 509 unsigned long addr, int avoid_reserve); 510struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, 511 nodemask_t *nmask, gfp_t gfp_mask); 512struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, 513 unsigned long address); 514int huge_add_to_page_cache(struct page *page, struct address_space *mapping, 515 pgoff_t idx); 516 517/* arch callback */ 518int __init __alloc_bootmem_huge_page(struct hstate *h); 519int __init alloc_bootmem_huge_page(struct hstate *h); 520 521void __init hugetlb_add_hstate(unsigned order); 522bool __init arch_hugetlb_valid_size(unsigned long size); 523struct hstate *size_to_hstate(unsigned long size); 524 525#ifndef HUGE_MAX_HSTATE 526#define HUGE_MAX_HSTATE 1 527#endif 528 529extern struct hstate hstates[HUGE_MAX_HSTATE]; 530extern unsigned int default_hstate_idx; 531 532#define default_hstate (hstates[default_hstate_idx]) 533 534static inline struct hstate *hstate_file(struct file *f) 535{ 536 return hstate_inode(file_inode(f)); 537} 538 539static inline struct hstate *hstate_sizelog(int page_size_log) 540{ 541 if (!page_size_log) 542 return &default_hstate; 543 544 return size_to_hstate(1UL << page_size_log); 545} 546 547static inline struct hstate *hstate_vma(struct vm_area_struct *vma) 548{ 549 return hstate_file(vma->vm_file); 550} 551 552static inline unsigned long huge_page_size(struct hstate *h) 553{ 554 return (unsigned long)PAGE_SIZE << h->order; 555} 556 557extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma); 558 559extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma); 560 561static inline unsigned long huge_page_mask(struct hstate *h) 562{ 563 return h->mask; 564} 565 566static inline unsigned int huge_page_order(struct hstate *h) 567{ 568 return h->order; 569} 570 571static inline unsigned huge_page_shift(struct hstate *h) 572{ 573 return h->order + PAGE_SHIFT; 574} 575 576static inline bool hstate_is_gigantic(struct hstate *h) 577{ 578 return huge_page_order(h) >= MAX_ORDER; 579} 580 581static inline unsigned int pages_per_huge_page(struct hstate *h) 582{ 583 return 1 << h->order; 584} 585 586static inline unsigned int blocks_per_huge_page(struct hstate *h) 587{ 588 return huge_page_size(h) / 512; 589} 590 591#include <asm/hugetlb.h> 592 593#ifndef is_hugepage_only_range 594static inline int is_hugepage_only_range(struct mm_struct *mm, 595 unsigned long addr, unsigned long len) 596{ 597 return 0; 598} 599#define is_hugepage_only_range is_hugepage_only_range 600#endif 601 602#ifndef arch_clear_hugepage_flags 603static inline void arch_clear_hugepage_flags(struct page *page) { } 604#define arch_clear_hugepage_flags arch_clear_hugepage_flags 605#endif 606 607#ifndef arch_make_huge_pte 608static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, 609 struct page *page, int writable) 610{ 611 return entry; 612} 613#endif 614 615static inline struct hstate *page_hstate(struct page *page) 616{ 617 VM_BUG_ON_PAGE(!PageHuge(page), page); 618 return size_to_hstate(page_size(page)); 619} 620 621static inline unsigned hstate_index_to_shift(unsigned index) 622{ 623 return hstates[index].order + PAGE_SHIFT; 624} 625 626static inline int hstate_index(struct hstate *h) 627{ 628 return h - hstates; 629} 630 631pgoff_t __basepage_index(struct page *page); 632 633/* Return page->index in PAGE_SIZE units */ 634static inline pgoff_t basepage_index(struct page *page) 635{ 636 if (!PageCompound(page)) 637 return page->index; 638 639 return __basepage_index(page); 640} 641 642extern int dissolve_free_huge_page(struct page *page); 643extern int dissolve_free_huge_pages(unsigned long start_pfn, 644 unsigned long end_pfn); 645 646#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION 647#ifndef arch_hugetlb_migration_supported 648static inline bool arch_hugetlb_migration_supported(struct hstate *h) 649{ 650 if ((huge_page_shift(h) == PMD_SHIFT) || 651 (huge_page_shift(h) == PUD_SHIFT) || 652 (huge_page_shift(h) == PGDIR_SHIFT)) 653 return true; 654 else 655 return false; 656} 657#endif 658#else 659static inline bool arch_hugetlb_migration_supported(struct hstate *h) 660{ 661 return false; 662} 663#endif 664 665static inline bool hugepage_migration_supported(struct hstate *h) 666{ 667 return arch_hugetlb_migration_supported(h); 668} 669 670/* 671 * Movability check is different as compared to migration check. 672 * It determines whether or not a huge page should be placed on 673 * movable zone or not. Movability of any huge page should be 674 * required only if huge page size is supported for migration. 675 * There wont be any reason for the huge page to be movable if 676 * it is not migratable to start with. Also the size of the huge 677 * page should be large enough to be placed under a movable zone 678 * and still feasible enough to be migratable. Just the presence 679 * in movable zone does not make the migration feasible. 680 * 681 * So even though large huge page sizes like the gigantic ones 682 * are migratable they should not be movable because its not 683 * feasible to migrate them from movable zone. 684 */ 685static inline bool hugepage_movable_supported(struct hstate *h) 686{ 687 if (!hugepage_migration_supported(h)) 688 return false; 689 690 if (hstate_is_gigantic(h)) 691 return false; 692 return true; 693} 694 695/* Movability of hugepages depends on migration support. */ 696static inline gfp_t htlb_alloc_mask(struct hstate *h) 697{ 698 if (hugepage_movable_supported(h)) 699 return GFP_HIGHUSER_MOVABLE; 700 else 701 return GFP_HIGHUSER; 702} 703 704static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask) 705{ 706 gfp_t modified_mask = htlb_alloc_mask(h); 707 708 /* Some callers might want to enforce node */ 709 modified_mask |= (gfp_mask & __GFP_THISNODE); 710 711 modified_mask |= (gfp_mask & __GFP_NOWARN); 712 713 return modified_mask; 714} 715 716static inline spinlock_t *huge_pte_lockptr(struct hstate *h, 717 struct mm_struct *mm, pte_t *pte) 718{ 719 if (huge_page_size(h) == PMD_SIZE) 720 return pmd_lockptr(mm, (pmd_t *) pte); 721 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE); 722 return &mm->page_table_lock; 723} 724 725#ifndef hugepages_supported 726/* 727 * Some platform decide whether they support huge pages at boot 728 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0 729 * when there is no such support 730 */ 731#define hugepages_supported() (HPAGE_SHIFT != 0) 732#endif 733 734void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm); 735 736static inline void hugetlb_count_add(long l, struct mm_struct *mm) 737{ 738 atomic_long_add(l, &mm->hugetlb_usage); 739} 740 741static inline void hugetlb_count_sub(long l, struct mm_struct *mm) 742{ 743 atomic_long_sub(l, &mm->hugetlb_usage); 744} 745 746#ifndef set_huge_swap_pte_at 747static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, 748 pte_t *ptep, pte_t pte, unsigned long sz) 749{ 750 set_huge_pte_at(mm, addr, ptep, pte); 751} 752#endif 753 754#ifndef huge_ptep_modify_prot_start 755#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start 756static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, 757 unsigned long addr, pte_t *ptep) 758{ 759 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); 760} 761#endif 762 763#ifndef huge_ptep_modify_prot_commit 764#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit 765static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, 766 unsigned long addr, pte_t *ptep, 767 pte_t old_pte, pte_t pte) 768{ 769 set_huge_pte_at(vma->vm_mm, addr, ptep, pte); 770} 771#endif 772 773#else /* CONFIG_HUGETLB_PAGE */ 774struct hstate {}; 775 776static inline struct page *alloc_huge_page(struct vm_area_struct *vma, 777 unsigned long addr, 778 int avoid_reserve) 779{ 780 return NULL; 781} 782 783static inline struct page * 784alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, 785 nodemask_t *nmask, gfp_t gfp_mask) 786{ 787 return NULL; 788} 789 790static inline struct page *alloc_huge_page_vma(struct hstate *h, 791 struct vm_area_struct *vma, 792 unsigned long address) 793{ 794 return NULL; 795} 796 797static inline int __alloc_bootmem_huge_page(struct hstate *h) 798{ 799 return 0; 800} 801 802static inline struct hstate *hstate_file(struct file *f) 803{ 804 return NULL; 805} 806 807static inline struct hstate *hstate_sizelog(int page_size_log) 808{ 809 return NULL; 810} 811 812static inline struct hstate *hstate_vma(struct vm_area_struct *vma) 813{ 814 return NULL; 815} 816 817static inline struct hstate *page_hstate(struct page *page) 818{ 819 return NULL; 820} 821 822static inline unsigned long huge_page_size(struct hstate *h) 823{ 824 return PAGE_SIZE; 825} 826 827static inline unsigned long huge_page_mask(struct hstate *h) 828{ 829 return PAGE_MASK; 830} 831 832static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) 833{ 834 return PAGE_SIZE; 835} 836 837static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) 838{ 839 return PAGE_SIZE; 840} 841 842static inline unsigned int huge_page_order(struct hstate *h) 843{ 844 return 0; 845} 846 847static inline unsigned int huge_page_shift(struct hstate *h) 848{ 849 return PAGE_SHIFT; 850} 851 852static inline bool hstate_is_gigantic(struct hstate *h) 853{ 854 return false; 855} 856 857static inline unsigned int pages_per_huge_page(struct hstate *h) 858{ 859 return 1; 860} 861 862static inline unsigned hstate_index_to_shift(unsigned index) 863{ 864 return 0; 865} 866 867static inline int hstate_index(struct hstate *h) 868{ 869 return 0; 870} 871 872static inline pgoff_t basepage_index(struct page *page) 873{ 874 return page->index; 875} 876 877static inline int dissolve_free_huge_page(struct page *page) 878{ 879 return 0; 880} 881 882static inline int dissolve_free_huge_pages(unsigned long start_pfn, 883 unsigned long end_pfn) 884{ 885 return 0; 886} 887 888static inline bool hugepage_migration_supported(struct hstate *h) 889{ 890 return false; 891} 892 893static inline bool hugepage_movable_supported(struct hstate *h) 894{ 895 return false; 896} 897 898static inline gfp_t htlb_alloc_mask(struct hstate *h) 899{ 900 return 0; 901} 902 903static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask) 904{ 905 return 0; 906} 907 908static inline spinlock_t *huge_pte_lockptr(struct hstate *h, 909 struct mm_struct *mm, pte_t *pte) 910{ 911 return &mm->page_table_lock; 912} 913 914static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m) 915{ 916} 917 918static inline void hugetlb_count_sub(long l, struct mm_struct *mm) 919{ 920} 921 922static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, 923 pte_t *ptep, pte_t pte, unsigned long sz) 924{ 925} 926#endif /* CONFIG_HUGETLB_PAGE */ 927 928static inline spinlock_t *huge_pte_lock(struct hstate *h, 929 struct mm_struct *mm, pte_t *pte) 930{ 931 spinlock_t *ptl; 932 933 ptl = huge_pte_lockptr(h, mm, pte); 934 spin_lock(ptl); 935 return ptl; 936} 937 938#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA) 939extern void __init hugetlb_cma_reserve(int order); 940extern void __init hugetlb_cma_check(void); 941#else 942static inline __init void hugetlb_cma_reserve(int order) 943{ 944} 945static inline __init void hugetlb_cma_check(void) 946{ 947} 948#endif 949 950#endif /* _LINUX_HUGETLB_H */