Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_HUGETLB_H
3#define _LINUX_HUGETLB_H
4
5#include <linux/mm_types.h>
6#include <linux/mmdebug.h>
7#include <linux/fs.h>
8#include <linux/hugetlb_inline.h>
9#include <linux/cgroup.h>
10#include <linux/page_ref.h>
11#include <linux/list.h>
12#include <linux/kref.h>
13#include <linux/pgtable.h>
14#include <linux/gfp.h>
15#include <linux/userfaultfd_k.h>
16
17struct ctl_table;
18struct user_struct;
19struct mmu_gather;
20struct node;
21
22#ifndef CONFIG_ARCH_HAS_HUGEPD
23typedef struct { unsigned long pd; } hugepd_t;
24#define is_hugepd(hugepd) (0)
25#define __hugepd(x) ((hugepd_t) { (x) })
26#endif
27
28#ifdef CONFIG_HUGETLB_PAGE
29
30#include <linux/mempolicy.h>
31#include <linux/shm.h>
32#include <asm/tlbflush.h>
33
34/*
35 * For HugeTLB page, there are more metadata to save in the struct page. But
36 * the head struct page cannot meet our needs, so we have to abuse other tail
37 * struct page to store the metadata.
38 */
39#define __NR_USED_SUBPAGE 3
40
41struct hugepage_subpool {
42 spinlock_t lock;
43 long count;
44 long max_hpages; /* Maximum huge pages or -1 if no maximum. */
45 long used_hpages; /* Used count against maximum, includes */
46 /* both allocated and reserved pages. */
47 struct hstate *hstate;
48 long min_hpages; /* Minimum huge pages or -1 if no minimum. */
49 long rsv_hpages; /* Pages reserved against global pool to */
50 /* satisfy minimum size. */
51};
52
53struct resv_map {
54 struct kref refs;
55 spinlock_t lock;
56 struct list_head regions;
57 long adds_in_progress;
58 struct list_head region_cache;
59 long region_cache_count;
60#ifdef CONFIG_CGROUP_HUGETLB
61 /*
62 * On private mappings, the counter to uncharge reservations is stored
63 * here. If these fields are 0, then either the mapping is shared, or
64 * cgroup accounting is disabled for this resv_map.
65 */
66 struct page_counter *reservation_counter;
67 unsigned long pages_per_hpage;
68 struct cgroup_subsys_state *css;
69#endif
70};
71
72/*
73 * Region tracking -- allows tracking of reservations and instantiated pages
74 * across the pages in a mapping.
75 *
76 * The region data structures are embedded into a resv_map and protected
77 * by a resv_map's lock. The set of regions within the resv_map represent
78 * reservations for huge pages, or huge pages that have already been
79 * instantiated within the map. The from and to elements are huge page
80 * indices into the associated mapping. from indicates the starting index
81 * of the region. to represents the first index past the end of the region.
82 *
83 * For example, a file region structure with from == 0 and to == 4 represents
84 * four huge pages in a mapping. It is important to note that the to element
85 * represents the first element past the end of the region. This is used in
86 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
87 *
88 * Interval notation of the form [from, to) will be used to indicate that
89 * the endpoint from is inclusive and to is exclusive.
90 */
91struct file_region {
92 struct list_head link;
93 long from;
94 long to;
95#ifdef CONFIG_CGROUP_HUGETLB
96 /*
97 * On shared mappings, each reserved region appears as a struct
98 * file_region in resv_map. These fields hold the info needed to
99 * uncharge each reservation.
100 */
101 struct page_counter *reservation_counter;
102 struct cgroup_subsys_state *css;
103#endif
104};
105
106struct hugetlb_vma_lock {
107 struct kref refs;
108 struct rw_semaphore rw_sema;
109 struct vm_area_struct *vma;
110};
111
112extern struct resv_map *resv_map_alloc(void);
113void resv_map_release(struct kref *ref);
114
115extern spinlock_t hugetlb_lock;
116extern int hugetlb_max_hstate __read_mostly;
117#define for_each_hstate(h) \
118 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
119
120struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
121 long min_hpages);
122void hugepage_put_subpool(struct hugepage_subpool *spool);
123
124void hugetlb_dup_vma_private(struct vm_area_struct *vma);
125void clear_vma_resv_huge_pages(struct vm_area_struct *vma);
126int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
127int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *,
128 loff_t *);
129int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *,
130 loff_t *);
131int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *,
132 loff_t *);
133
134int move_hugetlb_page_tables(struct vm_area_struct *vma,
135 struct vm_area_struct *new_vma,
136 unsigned long old_addr, unsigned long new_addr,
137 unsigned long len);
138int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
139 struct vm_area_struct *, struct vm_area_struct *);
140struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
141 unsigned long address, unsigned int flags);
142long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
143 struct page **, struct vm_area_struct **,
144 unsigned long *, unsigned long *, long, unsigned int,
145 int *);
146void unmap_hugepage_range(struct vm_area_struct *,
147 unsigned long, unsigned long, struct page *,
148 zap_flags_t);
149void __unmap_hugepage_range_final(struct mmu_gather *tlb,
150 struct vm_area_struct *vma,
151 unsigned long start, unsigned long end,
152 struct page *ref_page, zap_flags_t zap_flags);
153void hugetlb_report_meminfo(struct seq_file *);
154int hugetlb_report_node_meminfo(char *buf, int len, int nid);
155void hugetlb_show_meminfo_node(int nid);
156unsigned long hugetlb_total_pages(void);
157vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
158 unsigned long address, unsigned int flags);
159#ifdef CONFIG_USERFAULTFD
160int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
161 struct vm_area_struct *dst_vma,
162 unsigned long dst_addr,
163 unsigned long src_addr,
164 enum mcopy_atomic_mode mode,
165 struct page **pagep,
166 bool wp_copy);
167#endif /* CONFIG_USERFAULTFD */
168bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
169 struct vm_area_struct *vma,
170 vm_flags_t vm_flags);
171long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
172 long freed);
173int isolate_hugetlb(struct page *page, struct list_head *list);
174int get_hwpoison_huge_page(struct page *page, bool *hugetlb, bool unpoison);
175int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
176 bool *migratable_cleared);
177void putback_active_hugepage(struct page *page);
178void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason);
179void free_huge_page(struct page *page);
180void hugetlb_fix_reserve_counts(struct inode *inode);
181extern struct mutex *hugetlb_fault_mutex_table;
182u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
183
184pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
185 unsigned long addr, pud_t *pud);
186
187struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
188
189extern int sysctl_hugetlb_shm_group;
190extern struct list_head huge_boot_pages;
191
192/* arch callbacks */
193
194pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
195 unsigned long addr, unsigned long sz);
196pte_t *huge_pte_offset(struct mm_struct *mm,
197 unsigned long addr, unsigned long sz);
198unsigned long hugetlb_mask_last_page(struct hstate *h);
199int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
200 unsigned long addr, pte_t *ptep);
201void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
202 unsigned long *start, unsigned long *end);
203
204void hugetlb_vma_lock_read(struct vm_area_struct *vma);
205void hugetlb_vma_unlock_read(struct vm_area_struct *vma);
206void hugetlb_vma_lock_write(struct vm_area_struct *vma);
207void hugetlb_vma_unlock_write(struct vm_area_struct *vma);
208int hugetlb_vma_trylock_write(struct vm_area_struct *vma);
209void hugetlb_vma_assert_locked(struct vm_area_struct *vma);
210void hugetlb_vma_lock_release(struct kref *kref);
211
212int pmd_huge(pmd_t pmd);
213int pud_huge(pud_t pud);
214unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
215 unsigned long address, unsigned long end, pgprot_t newprot,
216 unsigned long cp_flags);
217
218bool is_hugetlb_entry_migration(pte_t pte);
219void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
220
221#else /* !CONFIG_HUGETLB_PAGE */
222
223static inline void hugetlb_dup_vma_private(struct vm_area_struct *vma)
224{
225}
226
227static inline void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
228{
229}
230
231static inline unsigned long hugetlb_total_pages(void)
232{
233 return 0;
234}
235
236static inline struct address_space *hugetlb_page_mapping_lock_write(
237 struct page *hpage)
238{
239 return NULL;
240}
241
242static inline int huge_pmd_unshare(struct mm_struct *mm,
243 struct vm_area_struct *vma,
244 unsigned long addr, pte_t *ptep)
245{
246 return 0;
247}
248
249static inline void adjust_range_if_pmd_sharing_possible(
250 struct vm_area_struct *vma,
251 unsigned long *start, unsigned long *end)
252{
253}
254
255static inline struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
256 unsigned long address, unsigned int flags)
257{
258 BUILD_BUG(); /* should never be compiled in if !CONFIG_HUGETLB_PAGE*/
259}
260
261static inline long follow_hugetlb_page(struct mm_struct *mm,
262 struct vm_area_struct *vma, struct page **pages,
263 struct vm_area_struct **vmas, unsigned long *position,
264 unsigned long *nr_pages, long i, unsigned int flags,
265 int *nonblocking)
266{
267 BUG();
268 return 0;
269}
270
271static inline int copy_hugetlb_page_range(struct mm_struct *dst,
272 struct mm_struct *src,
273 struct vm_area_struct *dst_vma,
274 struct vm_area_struct *src_vma)
275{
276 BUG();
277 return 0;
278}
279
280static inline int move_hugetlb_page_tables(struct vm_area_struct *vma,
281 struct vm_area_struct *new_vma,
282 unsigned long old_addr,
283 unsigned long new_addr,
284 unsigned long len)
285{
286 BUG();
287 return 0;
288}
289
290static inline void hugetlb_report_meminfo(struct seq_file *m)
291{
292}
293
294static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
295{
296 return 0;
297}
298
299static inline void hugetlb_show_meminfo_node(int nid)
300{
301}
302
303static inline int prepare_hugepage_range(struct file *file,
304 unsigned long addr, unsigned long len)
305{
306 return -EINVAL;
307}
308
309static inline void hugetlb_vma_lock_read(struct vm_area_struct *vma)
310{
311}
312
313static inline void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
314{
315}
316
317static inline void hugetlb_vma_lock_write(struct vm_area_struct *vma)
318{
319}
320
321static inline void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
322{
323}
324
325static inline int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
326{
327 return 1;
328}
329
330static inline void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
331{
332}
333
334static inline int pmd_huge(pmd_t pmd)
335{
336 return 0;
337}
338
339static inline int pud_huge(pud_t pud)
340{
341 return 0;
342}
343
344static inline int is_hugepage_only_range(struct mm_struct *mm,
345 unsigned long addr, unsigned long len)
346{
347 return 0;
348}
349
350static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
351 unsigned long addr, unsigned long end,
352 unsigned long floor, unsigned long ceiling)
353{
354 BUG();
355}
356
357#ifdef CONFIG_USERFAULTFD
358static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
359 pte_t *dst_pte,
360 struct vm_area_struct *dst_vma,
361 unsigned long dst_addr,
362 unsigned long src_addr,
363 enum mcopy_atomic_mode mode,
364 struct page **pagep,
365 bool wp_copy)
366{
367 BUG();
368 return 0;
369}
370#endif /* CONFIG_USERFAULTFD */
371
372static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
373 unsigned long sz)
374{
375 return NULL;
376}
377
378static inline int isolate_hugetlb(struct page *page, struct list_head *list)
379{
380 return -EBUSY;
381}
382
383static inline int get_hwpoison_huge_page(struct page *page, bool *hugetlb, bool unpoison)
384{
385 return 0;
386}
387
388static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
389 bool *migratable_cleared)
390{
391 return 0;
392}
393
394static inline void putback_active_hugepage(struct page *page)
395{
396}
397
398static inline void move_hugetlb_state(struct folio *old_folio,
399 struct folio *new_folio, int reason)
400{
401}
402
403static inline unsigned long hugetlb_change_protection(
404 struct vm_area_struct *vma, unsigned long address,
405 unsigned long end, pgprot_t newprot,
406 unsigned long cp_flags)
407{
408 return 0;
409}
410
411static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
412 struct vm_area_struct *vma, unsigned long start,
413 unsigned long end, struct page *ref_page,
414 zap_flags_t zap_flags)
415{
416 BUG();
417}
418
419static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
420 struct vm_area_struct *vma, unsigned long address,
421 unsigned int flags)
422{
423 BUG();
424 return 0;
425}
426
427static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
428
429#endif /* !CONFIG_HUGETLB_PAGE */
430/*
431 * hugepages at page global directory. If arch support
432 * hugepages at pgd level, they need to define this.
433 */
434#ifndef pgd_huge
435#define pgd_huge(x) 0
436#endif
437#ifndef p4d_huge
438#define p4d_huge(x) 0
439#endif
440
441#ifndef pgd_write
442static inline int pgd_write(pgd_t pgd)
443{
444 BUG();
445 return 0;
446}
447#endif
448
449#define HUGETLB_ANON_FILE "anon_hugepage"
450
451enum {
452 /*
453 * The file will be used as an shm file so shmfs accounting rules
454 * apply
455 */
456 HUGETLB_SHMFS_INODE = 1,
457 /*
458 * The file is being created on the internal vfs mount and shmfs
459 * accounting rules do not apply
460 */
461 HUGETLB_ANONHUGE_INODE = 2,
462};
463
464#ifdef CONFIG_HUGETLBFS
465struct hugetlbfs_sb_info {
466 long max_inodes; /* inodes allowed */
467 long free_inodes; /* inodes free */
468 spinlock_t stat_lock;
469 struct hstate *hstate;
470 struct hugepage_subpool *spool;
471 kuid_t uid;
472 kgid_t gid;
473 umode_t mode;
474};
475
476static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
477{
478 return sb->s_fs_info;
479}
480
481struct hugetlbfs_inode_info {
482 struct shared_policy policy;
483 struct inode vfs_inode;
484 unsigned int seals;
485};
486
487static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
488{
489 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
490}
491
492extern const struct file_operations hugetlbfs_file_operations;
493extern const struct vm_operations_struct hugetlb_vm_ops;
494struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
495 int creat_flags, int page_size_log);
496
497static inline bool is_file_hugepages(struct file *file)
498{
499 if (file->f_op == &hugetlbfs_file_operations)
500 return true;
501
502 return is_file_shm_hugepages(file);
503}
504
505static inline struct hstate *hstate_inode(struct inode *i)
506{
507 return HUGETLBFS_SB(i->i_sb)->hstate;
508}
509#else /* !CONFIG_HUGETLBFS */
510
511#define is_file_hugepages(file) false
512static inline struct file *
513hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
514 int creat_flags, int page_size_log)
515{
516 return ERR_PTR(-ENOSYS);
517}
518
519static inline struct hstate *hstate_inode(struct inode *i)
520{
521 return NULL;
522}
523#endif /* !CONFIG_HUGETLBFS */
524
525#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
526unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
527 unsigned long len, unsigned long pgoff,
528 unsigned long flags);
529#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
530
531unsigned long
532generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
533 unsigned long len, unsigned long pgoff,
534 unsigned long flags);
535
536/*
537 * huegtlb page specific state flags. These flags are located in page.private
538 * of the hugetlb head page. Functions created via the below macros should be
539 * used to manipulate these flags.
540 *
541 * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at
542 * allocation time. Cleared when page is fully instantiated. Free
543 * routine checks flag to restore a reservation on error paths.
544 * Synchronization: Examined or modified by code that knows it has
545 * the only reference to page. i.e. After allocation but before use
546 * or when the page is being freed.
547 * HPG_migratable - Set after a newly allocated page is added to the page
548 * cache and/or page tables. Indicates the page is a candidate for
549 * migration.
550 * Synchronization: Initially set after new page allocation with no
551 * locking. When examined and modified during migration processing
552 * (isolate, migrate, putback) the hugetlb_lock is held.
553 * HPG_temporary - Set on a page that is temporarily allocated from the buddy
554 * allocator. Typically used for migration target pages when no pages
555 * are available in the pool. The hugetlb free page path will
556 * immediately free pages with this flag set to the buddy allocator.
557 * Synchronization: Can be set after huge page allocation from buddy when
558 * code knows it has only reference. All other examinations and
559 * modifications require hugetlb_lock.
560 * HPG_freed - Set when page is on the free lists.
561 * Synchronization: hugetlb_lock held for examination and modification.
562 * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed.
563 * HPG_raw_hwp_unreliable - Set when the hugetlb page has a hwpoison sub-page
564 * that is not tracked by raw_hwp_page list.
565 */
566enum hugetlb_page_flags {
567 HPG_restore_reserve = 0,
568 HPG_migratable,
569 HPG_temporary,
570 HPG_freed,
571 HPG_vmemmap_optimized,
572 HPG_raw_hwp_unreliable,
573 __NR_HPAGEFLAGS,
574};
575
576/*
577 * Macros to create test, set and clear function definitions for
578 * hugetlb specific page flags.
579 */
580#ifdef CONFIG_HUGETLB_PAGE
581#define TESTHPAGEFLAG(uname, flname) \
582static __always_inline \
583bool folio_test_hugetlb_##flname(struct folio *folio) \
584 { void *private = &folio->private; \
585 return test_bit(HPG_##flname, private); \
586 } \
587static inline int HPage##uname(struct page *page) \
588 { return test_bit(HPG_##flname, &(page->private)); }
589
590#define SETHPAGEFLAG(uname, flname) \
591static __always_inline \
592void folio_set_hugetlb_##flname(struct folio *folio) \
593 { void *private = &folio->private; \
594 set_bit(HPG_##flname, private); \
595 } \
596static inline void SetHPage##uname(struct page *page) \
597 { set_bit(HPG_##flname, &(page->private)); }
598
599#define CLEARHPAGEFLAG(uname, flname) \
600static __always_inline \
601void folio_clear_hugetlb_##flname(struct folio *folio) \
602 { void *private = &folio->private; \
603 clear_bit(HPG_##flname, private); \
604 } \
605static inline void ClearHPage##uname(struct page *page) \
606 { clear_bit(HPG_##flname, &(page->private)); }
607#else
608#define TESTHPAGEFLAG(uname, flname) \
609static inline bool \
610folio_test_hugetlb_##flname(struct folio *folio) \
611 { return 0; } \
612static inline int HPage##uname(struct page *page) \
613 { return 0; }
614
615#define SETHPAGEFLAG(uname, flname) \
616static inline void \
617folio_set_hugetlb_##flname(struct folio *folio) \
618 { } \
619static inline void SetHPage##uname(struct page *page) \
620 { }
621
622#define CLEARHPAGEFLAG(uname, flname) \
623static inline void \
624folio_clear_hugetlb_##flname(struct folio *folio) \
625 { } \
626static inline void ClearHPage##uname(struct page *page) \
627 { }
628#endif
629
630#define HPAGEFLAG(uname, flname) \
631 TESTHPAGEFLAG(uname, flname) \
632 SETHPAGEFLAG(uname, flname) \
633 CLEARHPAGEFLAG(uname, flname) \
634
635/*
636 * Create functions associated with hugetlb page flags
637 */
638HPAGEFLAG(RestoreReserve, restore_reserve)
639HPAGEFLAG(Migratable, migratable)
640HPAGEFLAG(Temporary, temporary)
641HPAGEFLAG(Freed, freed)
642HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
643HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable)
644
645#ifdef CONFIG_HUGETLB_PAGE
646
647#define HSTATE_NAME_LEN 32
648/* Defines one hugetlb page size */
649struct hstate {
650 struct mutex resize_lock;
651 int next_nid_to_alloc;
652 int next_nid_to_free;
653 unsigned int order;
654 unsigned int demote_order;
655 unsigned long mask;
656 unsigned long max_huge_pages;
657 unsigned long nr_huge_pages;
658 unsigned long free_huge_pages;
659 unsigned long resv_huge_pages;
660 unsigned long surplus_huge_pages;
661 unsigned long nr_overcommit_huge_pages;
662 struct list_head hugepage_activelist;
663 struct list_head hugepage_freelists[MAX_NUMNODES];
664 unsigned int max_huge_pages_node[MAX_NUMNODES];
665 unsigned int nr_huge_pages_node[MAX_NUMNODES];
666 unsigned int free_huge_pages_node[MAX_NUMNODES];
667 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
668#ifdef CONFIG_CGROUP_HUGETLB
669 /* cgroup control files */
670 struct cftype cgroup_files_dfl[8];
671 struct cftype cgroup_files_legacy[10];
672#endif
673 char name[HSTATE_NAME_LEN];
674};
675
676struct huge_bootmem_page {
677 struct list_head list;
678 struct hstate *hstate;
679};
680
681int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
682struct page *alloc_huge_page(struct vm_area_struct *vma,
683 unsigned long addr, int avoid_reserve);
684struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
685 nodemask_t *nmask, gfp_t gfp_mask);
686struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
687 unsigned long address);
688int hugetlb_add_to_page_cache(struct page *page, struct address_space *mapping,
689 pgoff_t idx);
690void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
691 unsigned long address, struct page *page);
692
693/* arch callback */
694int __init __alloc_bootmem_huge_page(struct hstate *h, int nid);
695int __init alloc_bootmem_huge_page(struct hstate *h, int nid);
696bool __init hugetlb_node_alloc_supported(void);
697
698void __init hugetlb_add_hstate(unsigned order);
699bool __init arch_hugetlb_valid_size(unsigned long size);
700struct hstate *size_to_hstate(unsigned long size);
701
702#ifndef HUGE_MAX_HSTATE
703#define HUGE_MAX_HSTATE 1
704#endif
705
706extern struct hstate hstates[HUGE_MAX_HSTATE];
707extern unsigned int default_hstate_idx;
708
709#define default_hstate (hstates[default_hstate_idx])
710
711static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio)
712{
713 return folio->_hugetlb_subpool;
714}
715
716/*
717 * hugetlb page subpool pointer located in hpage[2].hugetlb_subpool
718 */
719static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
720{
721 return hugetlb_folio_subpool(page_folio(hpage));
722}
723
724static inline void hugetlb_set_folio_subpool(struct folio *folio,
725 struct hugepage_subpool *subpool)
726{
727 folio->_hugetlb_subpool = subpool;
728}
729
730static inline void hugetlb_set_page_subpool(struct page *hpage,
731 struct hugepage_subpool *subpool)
732{
733 hugetlb_set_folio_subpool(page_folio(hpage), subpool);
734}
735
736static inline struct hstate *hstate_file(struct file *f)
737{
738 return hstate_inode(file_inode(f));
739}
740
741static inline struct hstate *hstate_sizelog(int page_size_log)
742{
743 if (!page_size_log)
744 return &default_hstate;
745
746 if (page_size_log < BITS_PER_LONG)
747 return size_to_hstate(1UL << page_size_log);
748
749 return NULL;
750}
751
752static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
753{
754 return hstate_file(vma->vm_file);
755}
756
757static inline unsigned long huge_page_size(const struct hstate *h)
758{
759 return (unsigned long)PAGE_SIZE << h->order;
760}
761
762extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
763
764extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
765
766static inline unsigned long huge_page_mask(struct hstate *h)
767{
768 return h->mask;
769}
770
771static inline unsigned int huge_page_order(struct hstate *h)
772{
773 return h->order;
774}
775
776static inline unsigned huge_page_shift(struct hstate *h)
777{
778 return h->order + PAGE_SHIFT;
779}
780
781static inline bool hstate_is_gigantic(struct hstate *h)
782{
783 return huge_page_order(h) >= MAX_ORDER;
784}
785
786static inline unsigned int pages_per_huge_page(const struct hstate *h)
787{
788 return 1 << h->order;
789}
790
791static inline unsigned int blocks_per_huge_page(struct hstate *h)
792{
793 return huge_page_size(h) / 512;
794}
795
796#include <asm/hugetlb.h>
797
798#ifndef is_hugepage_only_range
799static inline int is_hugepage_only_range(struct mm_struct *mm,
800 unsigned long addr, unsigned long len)
801{
802 return 0;
803}
804#define is_hugepage_only_range is_hugepage_only_range
805#endif
806
807#ifndef arch_clear_hugepage_flags
808static inline void arch_clear_hugepage_flags(struct page *page) { }
809#define arch_clear_hugepage_flags arch_clear_hugepage_flags
810#endif
811
812#ifndef arch_make_huge_pte
813static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift,
814 vm_flags_t flags)
815{
816 return pte_mkhuge(entry);
817}
818#endif
819
820static inline struct hstate *folio_hstate(struct folio *folio)
821{
822 VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
823 return size_to_hstate(folio_size(folio));
824}
825
826static inline struct hstate *page_hstate(struct page *page)
827{
828 return folio_hstate(page_folio(page));
829}
830
831static inline unsigned hstate_index_to_shift(unsigned index)
832{
833 return hstates[index].order + PAGE_SHIFT;
834}
835
836static inline int hstate_index(struct hstate *h)
837{
838 return h - hstates;
839}
840
841extern int dissolve_free_huge_page(struct page *page);
842extern int dissolve_free_huge_pages(unsigned long start_pfn,
843 unsigned long end_pfn);
844
845#ifdef CONFIG_MEMORY_FAILURE
846extern void hugetlb_clear_page_hwpoison(struct page *hpage);
847#else
848static inline void hugetlb_clear_page_hwpoison(struct page *hpage)
849{
850}
851#endif
852
853#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
854#ifndef arch_hugetlb_migration_supported
855static inline bool arch_hugetlb_migration_supported(struct hstate *h)
856{
857 if ((huge_page_shift(h) == PMD_SHIFT) ||
858 (huge_page_shift(h) == PUD_SHIFT) ||
859 (huge_page_shift(h) == PGDIR_SHIFT))
860 return true;
861 else
862 return false;
863}
864#endif
865#else
866static inline bool arch_hugetlb_migration_supported(struct hstate *h)
867{
868 return false;
869}
870#endif
871
872static inline bool hugepage_migration_supported(struct hstate *h)
873{
874 return arch_hugetlb_migration_supported(h);
875}
876
877/*
878 * Movability check is different as compared to migration check.
879 * It determines whether or not a huge page should be placed on
880 * movable zone or not. Movability of any huge page should be
881 * required only if huge page size is supported for migration.
882 * There won't be any reason for the huge page to be movable if
883 * it is not migratable to start with. Also the size of the huge
884 * page should be large enough to be placed under a movable zone
885 * and still feasible enough to be migratable. Just the presence
886 * in movable zone does not make the migration feasible.
887 *
888 * So even though large huge page sizes like the gigantic ones
889 * are migratable they should not be movable because its not
890 * feasible to migrate them from movable zone.
891 */
892static inline bool hugepage_movable_supported(struct hstate *h)
893{
894 if (!hugepage_migration_supported(h))
895 return false;
896
897 if (hstate_is_gigantic(h))
898 return false;
899 return true;
900}
901
902/* Movability of hugepages depends on migration support. */
903static inline gfp_t htlb_alloc_mask(struct hstate *h)
904{
905 if (hugepage_movable_supported(h))
906 return GFP_HIGHUSER_MOVABLE;
907 else
908 return GFP_HIGHUSER;
909}
910
911static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
912{
913 gfp_t modified_mask = htlb_alloc_mask(h);
914
915 /* Some callers might want to enforce node */
916 modified_mask |= (gfp_mask & __GFP_THISNODE);
917
918 modified_mask |= (gfp_mask & __GFP_NOWARN);
919
920 return modified_mask;
921}
922
923static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
924 struct mm_struct *mm, pte_t *pte)
925{
926 if (huge_page_size(h) == PMD_SIZE)
927 return pmd_lockptr(mm, (pmd_t *) pte);
928 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
929 return &mm->page_table_lock;
930}
931
932#ifndef hugepages_supported
933/*
934 * Some platform decide whether they support huge pages at boot
935 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
936 * when there is no such support
937 */
938#define hugepages_supported() (HPAGE_SHIFT != 0)
939#endif
940
941void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
942
943static inline void hugetlb_count_init(struct mm_struct *mm)
944{
945 atomic_long_set(&mm->hugetlb_usage, 0);
946}
947
948static inline void hugetlb_count_add(long l, struct mm_struct *mm)
949{
950 atomic_long_add(l, &mm->hugetlb_usage);
951}
952
953static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
954{
955 atomic_long_sub(l, &mm->hugetlb_usage);
956}
957
958#ifndef huge_ptep_modify_prot_start
959#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
960static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
961 unsigned long addr, pte_t *ptep)
962{
963 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
964}
965#endif
966
967#ifndef huge_ptep_modify_prot_commit
968#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
969static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
970 unsigned long addr, pte_t *ptep,
971 pte_t old_pte, pte_t pte)
972{
973 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
974}
975#endif
976
977#ifdef CONFIG_NUMA
978void hugetlb_register_node(struct node *node);
979void hugetlb_unregister_node(struct node *node);
980#endif
981
982#else /* CONFIG_HUGETLB_PAGE */
983struct hstate {};
984
985static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio)
986{
987 return NULL;
988}
989
990static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
991{
992 return NULL;
993}
994
995static inline int isolate_or_dissolve_huge_page(struct page *page,
996 struct list_head *list)
997{
998 return -ENOMEM;
999}
1000
1001static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
1002 unsigned long addr,
1003 int avoid_reserve)
1004{
1005 return NULL;
1006}
1007
1008static inline struct page *
1009alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
1010 nodemask_t *nmask, gfp_t gfp_mask)
1011{
1012 return NULL;
1013}
1014
1015static inline struct page *alloc_huge_page_vma(struct hstate *h,
1016 struct vm_area_struct *vma,
1017 unsigned long address)
1018{
1019 return NULL;
1020}
1021
1022static inline int __alloc_bootmem_huge_page(struct hstate *h)
1023{
1024 return 0;
1025}
1026
1027static inline struct hstate *hstate_file(struct file *f)
1028{
1029 return NULL;
1030}
1031
1032static inline struct hstate *hstate_sizelog(int page_size_log)
1033{
1034 return NULL;
1035}
1036
1037static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
1038{
1039 return NULL;
1040}
1041
1042static inline struct hstate *folio_hstate(struct folio *folio)
1043{
1044 return NULL;
1045}
1046
1047static inline struct hstate *page_hstate(struct page *page)
1048{
1049 return NULL;
1050}
1051
1052static inline struct hstate *size_to_hstate(unsigned long size)
1053{
1054 return NULL;
1055}
1056
1057static inline unsigned long huge_page_size(struct hstate *h)
1058{
1059 return PAGE_SIZE;
1060}
1061
1062static inline unsigned long huge_page_mask(struct hstate *h)
1063{
1064 return PAGE_MASK;
1065}
1066
1067static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
1068{
1069 return PAGE_SIZE;
1070}
1071
1072static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
1073{
1074 return PAGE_SIZE;
1075}
1076
1077static inline unsigned int huge_page_order(struct hstate *h)
1078{
1079 return 0;
1080}
1081
1082static inline unsigned int huge_page_shift(struct hstate *h)
1083{
1084 return PAGE_SHIFT;
1085}
1086
1087static inline bool hstate_is_gigantic(struct hstate *h)
1088{
1089 return false;
1090}
1091
1092static inline unsigned int pages_per_huge_page(struct hstate *h)
1093{
1094 return 1;
1095}
1096
1097static inline unsigned hstate_index_to_shift(unsigned index)
1098{
1099 return 0;
1100}
1101
1102static inline int hstate_index(struct hstate *h)
1103{
1104 return 0;
1105}
1106
1107static inline int dissolve_free_huge_page(struct page *page)
1108{
1109 return 0;
1110}
1111
1112static inline int dissolve_free_huge_pages(unsigned long start_pfn,
1113 unsigned long end_pfn)
1114{
1115 return 0;
1116}
1117
1118static inline bool hugepage_migration_supported(struct hstate *h)
1119{
1120 return false;
1121}
1122
1123static inline bool hugepage_movable_supported(struct hstate *h)
1124{
1125 return false;
1126}
1127
1128static inline gfp_t htlb_alloc_mask(struct hstate *h)
1129{
1130 return 0;
1131}
1132
1133static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
1134{
1135 return 0;
1136}
1137
1138static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
1139 struct mm_struct *mm, pte_t *pte)
1140{
1141 return &mm->page_table_lock;
1142}
1143
1144static inline void hugetlb_count_init(struct mm_struct *mm)
1145{
1146}
1147
1148static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
1149{
1150}
1151
1152static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
1153{
1154}
1155
1156static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
1157 unsigned long addr, pte_t *ptep)
1158{
1159 return *ptep;
1160}
1161
1162static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
1163 pte_t *ptep, pte_t pte)
1164{
1165}
1166
1167static inline void hugetlb_register_node(struct node *node)
1168{
1169}
1170
1171static inline void hugetlb_unregister_node(struct node *node)
1172{
1173}
1174#endif /* CONFIG_HUGETLB_PAGE */
1175
1176static inline spinlock_t *huge_pte_lock(struct hstate *h,
1177 struct mm_struct *mm, pte_t *pte)
1178{
1179 spinlock_t *ptl;
1180
1181 ptl = huge_pte_lockptr(h, mm, pte);
1182 spin_lock(ptl);
1183 return ptl;
1184}
1185
1186#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
1187extern void __init hugetlb_cma_reserve(int order);
1188#else
1189static inline __init void hugetlb_cma_reserve(int order)
1190{
1191}
1192#endif
1193
1194#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
1195static inline bool hugetlb_pmd_shared(pte_t *pte)
1196{
1197 return page_count(virt_to_page(pte)) > 1;
1198}
1199#else
1200static inline bool hugetlb_pmd_shared(pte_t *pte)
1201{
1202 return false;
1203}
1204#endif
1205
1206bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
1207
1208#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
1209/*
1210 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
1211 * implement this.
1212 */
1213#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
1214#endif
1215
1216#endif /* _LINUX_HUGETLB_H */