Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
fork
Configure Feed
Select the types of activity you want to include in your feed.
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_HUGETLB_H
3#define _LINUX_HUGETLB_H
4
5#include <linux/mm_types.h>
6#include <linux/mmdebug.h>
7#include <linux/fs.h>
8#include <linux/hugetlb_inline.h>
9#include <linux/cgroup.h>
10#include <linux/list.h>
11#include <linux/kref.h>
12#include <linux/pgtable.h>
13#include <linux/gfp.h>
14#include <linux/userfaultfd_k.h>
15
16struct ctl_table;
17struct user_struct;
18struct mmu_gather;
19struct node;
20
21#ifndef CONFIG_ARCH_HAS_HUGEPD
22typedef struct { unsigned long pd; } hugepd_t;
23#define is_hugepd(hugepd) (0)
24#define __hugepd(x) ((hugepd_t) { (x) })
25#endif
26
27#ifdef CONFIG_HUGETLB_PAGE
28
29#include <linux/mempolicy.h>
30#include <linux/shm.h>
31#include <asm/tlbflush.h>
32
33/*
34 * For HugeTLB page, there are more metadata to save in the struct page. But
35 * the head struct page cannot meet our needs, so we have to abuse other tail
36 * struct page to store the metadata. In order to avoid conflicts caused by
37 * subsequent use of more tail struct pages, we gather these discrete indexes
38 * of tail struct page here.
39 */
40enum {
41 SUBPAGE_INDEX_SUBPOOL = 1, /* reuse page->private */
42#ifdef CONFIG_CGROUP_HUGETLB
43 SUBPAGE_INDEX_CGROUP, /* reuse page->private */
44 SUBPAGE_INDEX_CGROUP_RSVD, /* reuse page->private */
45 __MAX_CGROUP_SUBPAGE_INDEX = SUBPAGE_INDEX_CGROUP_RSVD,
46#endif
47#ifdef CONFIG_MEMORY_FAILURE
48 SUBPAGE_INDEX_HWPOISON,
49#endif
50 __NR_USED_SUBPAGE,
51};
52
53struct hugepage_subpool {
54 spinlock_t lock;
55 long count;
56 long max_hpages; /* Maximum huge pages or -1 if no maximum. */
57 long used_hpages; /* Used count against maximum, includes */
58 /* both allocated and reserved pages. */
59 struct hstate *hstate;
60 long min_hpages; /* Minimum huge pages or -1 if no minimum. */
61 long rsv_hpages; /* Pages reserved against global pool to */
62 /* satisfy minimum size. */
63};
64
65struct resv_map {
66 struct kref refs;
67 spinlock_t lock;
68 struct list_head regions;
69 long adds_in_progress;
70 struct list_head region_cache;
71 long region_cache_count;
72#ifdef CONFIG_CGROUP_HUGETLB
73 /*
74 * On private mappings, the counter to uncharge reservations is stored
75 * here. If these fields are 0, then either the mapping is shared, or
76 * cgroup accounting is disabled for this resv_map.
77 */
78 struct page_counter *reservation_counter;
79 unsigned long pages_per_hpage;
80 struct cgroup_subsys_state *css;
81#endif
82};
83
84/*
85 * Region tracking -- allows tracking of reservations and instantiated pages
86 * across the pages in a mapping.
87 *
88 * The region data structures are embedded into a resv_map and protected
89 * by a resv_map's lock. The set of regions within the resv_map represent
90 * reservations for huge pages, or huge pages that have already been
91 * instantiated within the map. The from and to elements are huge page
92 * indices into the associated mapping. from indicates the starting index
93 * of the region. to represents the first index past the end of the region.
94 *
95 * For example, a file region structure with from == 0 and to == 4 represents
96 * four huge pages in a mapping. It is important to note that the to element
97 * represents the first element past the end of the region. This is used in
98 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
99 *
100 * Interval notation of the form [from, to) will be used to indicate that
101 * the endpoint from is inclusive and to is exclusive.
102 */
103struct file_region {
104 struct list_head link;
105 long from;
106 long to;
107#ifdef CONFIG_CGROUP_HUGETLB
108 /*
109 * On shared mappings, each reserved region appears as a struct
110 * file_region in resv_map. These fields hold the info needed to
111 * uncharge each reservation.
112 */
113 struct page_counter *reservation_counter;
114 struct cgroup_subsys_state *css;
115#endif
116};
117
118struct hugetlb_vma_lock {
119 struct kref refs;
120 struct rw_semaphore rw_sema;
121 struct vm_area_struct *vma;
122};
123
124extern struct resv_map *resv_map_alloc(void);
125void resv_map_release(struct kref *ref);
126
127extern spinlock_t hugetlb_lock;
128extern int hugetlb_max_hstate __read_mostly;
129#define for_each_hstate(h) \
130 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
131
132struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
133 long min_hpages);
134void hugepage_put_subpool(struct hugepage_subpool *spool);
135
136void hugetlb_dup_vma_private(struct vm_area_struct *vma);
137void clear_vma_resv_huge_pages(struct vm_area_struct *vma);
138int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
139int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *,
140 loff_t *);
141int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *,
142 loff_t *);
143int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *,
144 loff_t *);
145
146int move_hugetlb_page_tables(struct vm_area_struct *vma,
147 struct vm_area_struct *new_vma,
148 unsigned long old_addr, unsigned long new_addr,
149 unsigned long len);
150int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
151 struct vm_area_struct *, struct vm_area_struct *);
152long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
153 struct page **, struct vm_area_struct **,
154 unsigned long *, unsigned long *, long, unsigned int,
155 int *);
156void unmap_hugepage_range(struct vm_area_struct *,
157 unsigned long, unsigned long, struct page *,
158 zap_flags_t);
159void __unmap_hugepage_range_final(struct mmu_gather *tlb,
160 struct vm_area_struct *vma,
161 unsigned long start, unsigned long end,
162 struct page *ref_page, zap_flags_t zap_flags);
163void hugetlb_report_meminfo(struct seq_file *);
164int hugetlb_report_node_meminfo(char *buf, int len, int nid);
165void hugetlb_show_meminfo_node(int nid);
166unsigned long hugetlb_total_pages(void);
167vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
168 unsigned long address, unsigned int flags);
169#ifdef CONFIG_USERFAULTFD
170int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
171 struct vm_area_struct *dst_vma,
172 unsigned long dst_addr,
173 unsigned long src_addr,
174 enum mcopy_atomic_mode mode,
175 struct page **pagep,
176 bool wp_copy);
177#endif /* CONFIG_USERFAULTFD */
178bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
179 struct vm_area_struct *vma,
180 vm_flags_t vm_flags);
181long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
182 long freed);
183int isolate_hugetlb(struct page *page, struct list_head *list);
184int get_hwpoison_huge_page(struct page *page, bool *hugetlb);
185int get_huge_page_for_hwpoison(unsigned long pfn, int flags);
186void putback_active_hugepage(struct page *page);
187void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
188void free_huge_page(struct page *page);
189void hugetlb_fix_reserve_counts(struct inode *inode);
190extern struct mutex *hugetlb_fault_mutex_table;
191u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
192
193pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
194 unsigned long addr, pud_t *pud);
195
196struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
197
198extern int sysctl_hugetlb_shm_group;
199extern struct list_head huge_boot_pages;
200
201/* arch callbacks */
202
203pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
204 unsigned long addr, unsigned long sz);
205pte_t *huge_pte_offset(struct mm_struct *mm,
206 unsigned long addr, unsigned long sz);
207unsigned long hugetlb_mask_last_page(struct hstate *h);
208int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
209 unsigned long addr, pte_t *ptep);
210void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
211 unsigned long *start, unsigned long *end);
212struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
213 int write);
214struct page *follow_huge_pd(struct vm_area_struct *vma,
215 unsigned long address, hugepd_t hpd,
216 int flags, int pdshift);
217struct page *follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address,
218 int flags);
219struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
220 pud_t *pud, int flags);
221struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
222 pgd_t *pgd, int flags);
223
224void hugetlb_vma_lock_read(struct vm_area_struct *vma);
225void hugetlb_vma_unlock_read(struct vm_area_struct *vma);
226void hugetlb_vma_lock_write(struct vm_area_struct *vma);
227void hugetlb_vma_unlock_write(struct vm_area_struct *vma);
228int hugetlb_vma_trylock_write(struct vm_area_struct *vma);
229void hugetlb_vma_assert_locked(struct vm_area_struct *vma);
230void hugetlb_vma_lock_release(struct kref *kref);
231
232int pmd_huge(pmd_t pmd);
233int pud_huge(pud_t pud);
234unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
235 unsigned long address, unsigned long end, pgprot_t newprot,
236 unsigned long cp_flags);
237
238bool is_hugetlb_entry_migration(pte_t pte);
239void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
240
241#else /* !CONFIG_HUGETLB_PAGE */
242
243static inline void hugetlb_dup_vma_private(struct vm_area_struct *vma)
244{
245}
246
247static inline void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
248{
249}
250
251static inline unsigned long hugetlb_total_pages(void)
252{
253 return 0;
254}
255
256static inline struct address_space *hugetlb_page_mapping_lock_write(
257 struct page *hpage)
258{
259 return NULL;
260}
261
262static inline int huge_pmd_unshare(struct mm_struct *mm,
263 struct vm_area_struct *vma,
264 unsigned long addr, pte_t *ptep)
265{
266 return 0;
267}
268
269static inline void adjust_range_if_pmd_sharing_possible(
270 struct vm_area_struct *vma,
271 unsigned long *start, unsigned long *end)
272{
273}
274
275static inline long follow_hugetlb_page(struct mm_struct *mm,
276 struct vm_area_struct *vma, struct page **pages,
277 struct vm_area_struct **vmas, unsigned long *position,
278 unsigned long *nr_pages, long i, unsigned int flags,
279 int *nonblocking)
280{
281 BUG();
282 return 0;
283}
284
285static inline struct page *follow_huge_addr(struct mm_struct *mm,
286 unsigned long address, int write)
287{
288 return ERR_PTR(-EINVAL);
289}
290
291static inline int copy_hugetlb_page_range(struct mm_struct *dst,
292 struct mm_struct *src,
293 struct vm_area_struct *dst_vma,
294 struct vm_area_struct *src_vma)
295{
296 BUG();
297 return 0;
298}
299
300static inline int move_hugetlb_page_tables(struct vm_area_struct *vma,
301 struct vm_area_struct *new_vma,
302 unsigned long old_addr,
303 unsigned long new_addr,
304 unsigned long len)
305{
306 BUG();
307 return 0;
308}
309
310static inline void hugetlb_report_meminfo(struct seq_file *m)
311{
312}
313
314static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
315{
316 return 0;
317}
318
319static inline void hugetlb_show_meminfo_node(int nid)
320{
321}
322
323static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
324 unsigned long address, hugepd_t hpd, int flags,
325 int pdshift)
326{
327 return NULL;
328}
329
330static inline struct page *follow_huge_pmd_pte(struct vm_area_struct *vma,
331 unsigned long address, int flags)
332{
333 return NULL;
334}
335
336static inline struct page *follow_huge_pud(struct mm_struct *mm,
337 unsigned long address, pud_t *pud, int flags)
338{
339 return NULL;
340}
341
342static inline struct page *follow_huge_pgd(struct mm_struct *mm,
343 unsigned long address, pgd_t *pgd, int flags)
344{
345 return NULL;
346}
347
348static inline int prepare_hugepage_range(struct file *file,
349 unsigned long addr, unsigned long len)
350{
351 return -EINVAL;
352}
353
354static inline void hugetlb_vma_lock_read(struct vm_area_struct *vma)
355{
356}
357
358static inline void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
359{
360}
361
362static inline void hugetlb_vma_lock_write(struct vm_area_struct *vma)
363{
364}
365
366static inline void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
367{
368}
369
370static inline int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
371{
372 return 1;
373}
374
375static inline void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
376{
377}
378
379static inline int pmd_huge(pmd_t pmd)
380{
381 return 0;
382}
383
384static inline int pud_huge(pud_t pud)
385{
386 return 0;
387}
388
389static inline int is_hugepage_only_range(struct mm_struct *mm,
390 unsigned long addr, unsigned long len)
391{
392 return 0;
393}
394
395static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
396 unsigned long addr, unsigned long end,
397 unsigned long floor, unsigned long ceiling)
398{
399 BUG();
400}
401
402#ifdef CONFIG_USERFAULTFD
403static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
404 pte_t *dst_pte,
405 struct vm_area_struct *dst_vma,
406 unsigned long dst_addr,
407 unsigned long src_addr,
408 enum mcopy_atomic_mode mode,
409 struct page **pagep,
410 bool wp_copy)
411{
412 BUG();
413 return 0;
414}
415#endif /* CONFIG_USERFAULTFD */
416
417static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
418 unsigned long sz)
419{
420 return NULL;
421}
422
423static inline int isolate_hugetlb(struct page *page, struct list_head *list)
424{
425 return -EBUSY;
426}
427
428static inline int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
429{
430 return 0;
431}
432
433static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags)
434{
435 return 0;
436}
437
438static inline void putback_active_hugepage(struct page *page)
439{
440}
441
442static inline void move_hugetlb_state(struct page *oldpage,
443 struct page *newpage, int reason)
444{
445}
446
447static inline unsigned long hugetlb_change_protection(
448 struct vm_area_struct *vma, unsigned long address,
449 unsigned long end, pgprot_t newprot,
450 unsigned long cp_flags)
451{
452 return 0;
453}
454
455static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
456 struct vm_area_struct *vma, unsigned long start,
457 unsigned long end, struct page *ref_page,
458 zap_flags_t zap_flags)
459{
460 BUG();
461}
462
463static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
464 struct vm_area_struct *vma, unsigned long address,
465 unsigned int flags)
466{
467 BUG();
468 return 0;
469}
470
471static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
472
473#endif /* !CONFIG_HUGETLB_PAGE */
474/*
475 * hugepages at page global directory. If arch support
476 * hugepages at pgd level, they need to define this.
477 */
478#ifndef pgd_huge
479#define pgd_huge(x) 0
480#endif
481#ifndef p4d_huge
482#define p4d_huge(x) 0
483#endif
484
485#ifndef pgd_write
486static inline int pgd_write(pgd_t pgd)
487{
488 BUG();
489 return 0;
490}
491#endif
492
493#define HUGETLB_ANON_FILE "anon_hugepage"
494
495enum {
496 /*
497 * The file will be used as an shm file so shmfs accounting rules
498 * apply
499 */
500 HUGETLB_SHMFS_INODE = 1,
501 /*
502 * The file is being created on the internal vfs mount and shmfs
503 * accounting rules do not apply
504 */
505 HUGETLB_ANONHUGE_INODE = 2,
506};
507
508#ifdef CONFIG_HUGETLBFS
509struct hugetlbfs_sb_info {
510 long max_inodes; /* inodes allowed */
511 long free_inodes; /* inodes free */
512 spinlock_t stat_lock;
513 struct hstate *hstate;
514 struct hugepage_subpool *spool;
515 kuid_t uid;
516 kgid_t gid;
517 umode_t mode;
518};
519
520static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
521{
522 return sb->s_fs_info;
523}
524
525struct hugetlbfs_inode_info {
526 struct shared_policy policy;
527 struct inode vfs_inode;
528 unsigned int seals;
529};
530
531static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
532{
533 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
534}
535
536extern const struct file_operations hugetlbfs_file_operations;
537extern const struct vm_operations_struct hugetlb_vm_ops;
538struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
539 int creat_flags, int page_size_log);
540
541static inline bool is_file_hugepages(struct file *file)
542{
543 if (file->f_op == &hugetlbfs_file_operations)
544 return true;
545
546 return is_file_shm_hugepages(file);
547}
548
549static inline struct hstate *hstate_inode(struct inode *i)
550{
551 return HUGETLBFS_SB(i->i_sb)->hstate;
552}
553#else /* !CONFIG_HUGETLBFS */
554
555#define is_file_hugepages(file) false
556static inline struct file *
557hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
558 int creat_flags, int page_size_log)
559{
560 return ERR_PTR(-ENOSYS);
561}
562
563static inline struct hstate *hstate_inode(struct inode *i)
564{
565 return NULL;
566}
567#endif /* !CONFIG_HUGETLBFS */
568
569#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
570unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
571 unsigned long len, unsigned long pgoff,
572 unsigned long flags);
573#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
574
575unsigned long
576generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
577 unsigned long len, unsigned long pgoff,
578 unsigned long flags);
579
580/*
581 * huegtlb page specific state flags. These flags are located in page.private
582 * of the hugetlb head page. Functions created via the below macros should be
583 * used to manipulate these flags.
584 *
585 * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at
586 * allocation time. Cleared when page is fully instantiated. Free
587 * routine checks flag to restore a reservation on error paths.
588 * Synchronization: Examined or modified by code that knows it has
589 * the only reference to page. i.e. After allocation but before use
590 * or when the page is being freed.
591 * HPG_migratable - Set after a newly allocated page is added to the page
592 * cache and/or page tables. Indicates the page is a candidate for
593 * migration.
594 * Synchronization: Initially set after new page allocation with no
595 * locking. When examined and modified during migration processing
596 * (isolate, migrate, putback) the hugetlb_lock is held.
597 * HPG_temporary - Set on a page that is temporarily allocated from the buddy
598 * allocator. Typically used for migration target pages when no pages
599 * are available in the pool. The hugetlb free page path will
600 * immediately free pages with this flag set to the buddy allocator.
601 * Synchronization: Can be set after huge page allocation from buddy when
602 * code knows it has only reference. All other examinations and
603 * modifications require hugetlb_lock.
604 * HPG_freed - Set when page is on the free lists.
605 * Synchronization: hugetlb_lock held for examination and modification.
606 * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed.
607 * HPG_raw_hwp_unreliable - Set when the hugetlb page has a hwpoison sub-page
608 * that is not tracked by raw_hwp_page list.
609 */
610enum hugetlb_page_flags {
611 HPG_restore_reserve = 0,
612 HPG_migratable,
613 HPG_temporary,
614 HPG_freed,
615 HPG_vmemmap_optimized,
616 HPG_raw_hwp_unreliable,
617 __NR_HPAGEFLAGS,
618};
619
620/*
621 * Macros to create test, set and clear function definitions for
622 * hugetlb specific page flags.
623 */
624#ifdef CONFIG_HUGETLB_PAGE
625#define TESTHPAGEFLAG(uname, flname) \
626static inline int HPage##uname(struct page *page) \
627 { return test_bit(HPG_##flname, &(page->private)); }
628
629#define SETHPAGEFLAG(uname, flname) \
630static inline void SetHPage##uname(struct page *page) \
631 { set_bit(HPG_##flname, &(page->private)); }
632
633#define CLEARHPAGEFLAG(uname, flname) \
634static inline void ClearHPage##uname(struct page *page) \
635 { clear_bit(HPG_##flname, &(page->private)); }
636#else
637#define TESTHPAGEFLAG(uname, flname) \
638static inline int HPage##uname(struct page *page) \
639 { return 0; }
640
641#define SETHPAGEFLAG(uname, flname) \
642static inline void SetHPage##uname(struct page *page) \
643 { }
644
645#define CLEARHPAGEFLAG(uname, flname) \
646static inline void ClearHPage##uname(struct page *page) \
647 { }
648#endif
649
650#define HPAGEFLAG(uname, flname) \
651 TESTHPAGEFLAG(uname, flname) \
652 SETHPAGEFLAG(uname, flname) \
653 CLEARHPAGEFLAG(uname, flname) \
654
655/*
656 * Create functions associated with hugetlb page flags
657 */
658HPAGEFLAG(RestoreReserve, restore_reserve)
659HPAGEFLAG(Migratable, migratable)
660HPAGEFLAG(Temporary, temporary)
661HPAGEFLAG(Freed, freed)
662HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
663HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable)
664
665#ifdef CONFIG_HUGETLB_PAGE
666
667#define HSTATE_NAME_LEN 32
668/* Defines one hugetlb page size */
669struct hstate {
670 struct mutex resize_lock;
671 int next_nid_to_alloc;
672 int next_nid_to_free;
673 unsigned int order;
674 unsigned int demote_order;
675 unsigned long mask;
676 unsigned long max_huge_pages;
677 unsigned long nr_huge_pages;
678 unsigned long free_huge_pages;
679 unsigned long resv_huge_pages;
680 unsigned long surplus_huge_pages;
681 unsigned long nr_overcommit_huge_pages;
682 struct list_head hugepage_activelist;
683 struct list_head hugepage_freelists[MAX_NUMNODES];
684 unsigned int max_huge_pages_node[MAX_NUMNODES];
685 unsigned int nr_huge_pages_node[MAX_NUMNODES];
686 unsigned int free_huge_pages_node[MAX_NUMNODES];
687 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
688#ifdef CONFIG_CGROUP_HUGETLB
689 /* cgroup control files */
690 struct cftype cgroup_files_dfl[8];
691 struct cftype cgroup_files_legacy[10];
692#endif
693 char name[HSTATE_NAME_LEN];
694};
695
696struct huge_bootmem_page {
697 struct list_head list;
698 struct hstate *hstate;
699};
700
701int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
702struct page *alloc_huge_page(struct vm_area_struct *vma,
703 unsigned long addr, int avoid_reserve);
704struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
705 nodemask_t *nmask, gfp_t gfp_mask);
706struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
707 unsigned long address);
708int hugetlb_add_to_page_cache(struct page *page, struct address_space *mapping,
709 pgoff_t idx);
710void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
711 unsigned long address, struct page *page);
712
713/* arch callback */
714int __init __alloc_bootmem_huge_page(struct hstate *h, int nid);
715int __init alloc_bootmem_huge_page(struct hstate *h, int nid);
716bool __init hugetlb_node_alloc_supported(void);
717
718void __init hugetlb_add_hstate(unsigned order);
719bool __init arch_hugetlb_valid_size(unsigned long size);
720struct hstate *size_to_hstate(unsigned long size);
721
722#ifndef HUGE_MAX_HSTATE
723#define HUGE_MAX_HSTATE 1
724#endif
725
726extern struct hstate hstates[HUGE_MAX_HSTATE];
727extern unsigned int default_hstate_idx;
728
729#define default_hstate (hstates[default_hstate_idx])
730
731/*
732 * hugetlb page subpool pointer located in hpage[1].private
733 */
734static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
735{
736 return (void *)page_private(hpage + SUBPAGE_INDEX_SUBPOOL);
737}
738
739static inline void hugetlb_set_page_subpool(struct page *hpage,
740 struct hugepage_subpool *subpool)
741{
742 set_page_private(hpage + SUBPAGE_INDEX_SUBPOOL, (unsigned long)subpool);
743}
744
745static inline struct hstate *hstate_file(struct file *f)
746{
747 return hstate_inode(file_inode(f));
748}
749
750static inline struct hstate *hstate_sizelog(int page_size_log)
751{
752 if (!page_size_log)
753 return &default_hstate;
754
755 return size_to_hstate(1UL << page_size_log);
756}
757
758static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
759{
760 return hstate_file(vma->vm_file);
761}
762
763static inline unsigned long huge_page_size(const struct hstate *h)
764{
765 return (unsigned long)PAGE_SIZE << h->order;
766}
767
768extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
769
770extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
771
772static inline unsigned long huge_page_mask(struct hstate *h)
773{
774 return h->mask;
775}
776
777static inline unsigned int huge_page_order(struct hstate *h)
778{
779 return h->order;
780}
781
782static inline unsigned huge_page_shift(struct hstate *h)
783{
784 return h->order + PAGE_SHIFT;
785}
786
787static inline bool hstate_is_gigantic(struct hstate *h)
788{
789 return huge_page_order(h) >= MAX_ORDER;
790}
791
792static inline unsigned int pages_per_huge_page(const struct hstate *h)
793{
794 return 1 << h->order;
795}
796
797static inline unsigned int blocks_per_huge_page(struct hstate *h)
798{
799 return huge_page_size(h) / 512;
800}
801
802#include <asm/hugetlb.h>
803
804#ifndef is_hugepage_only_range
805static inline int is_hugepage_only_range(struct mm_struct *mm,
806 unsigned long addr, unsigned long len)
807{
808 return 0;
809}
810#define is_hugepage_only_range is_hugepage_only_range
811#endif
812
813#ifndef arch_clear_hugepage_flags
814static inline void arch_clear_hugepage_flags(struct page *page) { }
815#define arch_clear_hugepage_flags arch_clear_hugepage_flags
816#endif
817
818#ifndef arch_make_huge_pte
819static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift,
820 vm_flags_t flags)
821{
822 return pte_mkhuge(entry);
823}
824#endif
825
826static inline struct hstate *page_hstate(struct page *page)
827{
828 VM_BUG_ON_PAGE(!PageHuge(page), page);
829 return size_to_hstate(page_size(page));
830}
831
832static inline unsigned hstate_index_to_shift(unsigned index)
833{
834 return hstates[index].order + PAGE_SHIFT;
835}
836
837static inline int hstate_index(struct hstate *h)
838{
839 return h - hstates;
840}
841
842extern int dissolve_free_huge_page(struct page *page);
843extern int dissolve_free_huge_pages(unsigned long start_pfn,
844 unsigned long end_pfn);
845
846#ifdef CONFIG_MEMORY_FAILURE
847extern void hugetlb_clear_page_hwpoison(struct page *hpage);
848#else
849static inline void hugetlb_clear_page_hwpoison(struct page *hpage)
850{
851}
852#endif
853
854#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
855#ifndef arch_hugetlb_migration_supported
856static inline bool arch_hugetlb_migration_supported(struct hstate *h)
857{
858 if ((huge_page_shift(h) == PMD_SHIFT) ||
859 (huge_page_shift(h) == PUD_SHIFT) ||
860 (huge_page_shift(h) == PGDIR_SHIFT))
861 return true;
862 else
863 return false;
864}
865#endif
866#else
867static inline bool arch_hugetlb_migration_supported(struct hstate *h)
868{
869 return false;
870}
871#endif
872
873static inline bool hugepage_migration_supported(struct hstate *h)
874{
875 return arch_hugetlb_migration_supported(h);
876}
877
878/*
879 * Movability check is different as compared to migration check.
880 * It determines whether or not a huge page should be placed on
881 * movable zone or not. Movability of any huge page should be
882 * required only if huge page size is supported for migration.
883 * There won't be any reason for the huge page to be movable if
884 * it is not migratable to start with. Also the size of the huge
885 * page should be large enough to be placed under a movable zone
886 * and still feasible enough to be migratable. Just the presence
887 * in movable zone does not make the migration feasible.
888 *
889 * So even though large huge page sizes like the gigantic ones
890 * are migratable they should not be movable because its not
891 * feasible to migrate them from movable zone.
892 */
893static inline bool hugepage_movable_supported(struct hstate *h)
894{
895 if (!hugepage_migration_supported(h))
896 return false;
897
898 if (hstate_is_gigantic(h))
899 return false;
900 return true;
901}
902
903/* Movability of hugepages depends on migration support. */
904static inline gfp_t htlb_alloc_mask(struct hstate *h)
905{
906 if (hugepage_movable_supported(h))
907 return GFP_HIGHUSER_MOVABLE;
908 else
909 return GFP_HIGHUSER;
910}
911
912static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
913{
914 gfp_t modified_mask = htlb_alloc_mask(h);
915
916 /* Some callers might want to enforce node */
917 modified_mask |= (gfp_mask & __GFP_THISNODE);
918
919 modified_mask |= (gfp_mask & __GFP_NOWARN);
920
921 return modified_mask;
922}
923
924static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
925 struct mm_struct *mm, pte_t *pte)
926{
927 if (huge_page_size(h) == PMD_SIZE)
928 return pmd_lockptr(mm, (pmd_t *) pte);
929 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
930 return &mm->page_table_lock;
931}
932
933#ifndef hugepages_supported
934/*
935 * Some platform decide whether they support huge pages at boot
936 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
937 * when there is no such support
938 */
939#define hugepages_supported() (HPAGE_SHIFT != 0)
940#endif
941
942void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
943
944static inline void hugetlb_count_init(struct mm_struct *mm)
945{
946 atomic_long_set(&mm->hugetlb_usage, 0);
947}
948
949static inline void hugetlb_count_add(long l, struct mm_struct *mm)
950{
951 atomic_long_add(l, &mm->hugetlb_usage);
952}
953
954static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
955{
956 atomic_long_sub(l, &mm->hugetlb_usage);
957}
958
959#ifndef huge_ptep_modify_prot_start
960#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
961static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
962 unsigned long addr, pte_t *ptep)
963{
964 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
965}
966#endif
967
968#ifndef huge_ptep_modify_prot_commit
969#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
970static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
971 unsigned long addr, pte_t *ptep,
972 pte_t old_pte, pte_t pte)
973{
974 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
975}
976#endif
977
978#ifdef CONFIG_NUMA
979void hugetlb_register_node(struct node *node);
980void hugetlb_unregister_node(struct node *node);
981#endif
982
983#else /* CONFIG_HUGETLB_PAGE */
984struct hstate {};
985
986static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
987{
988 return NULL;
989}
990
991static inline int isolate_or_dissolve_huge_page(struct page *page,
992 struct list_head *list)
993{
994 return -ENOMEM;
995}
996
997static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
998 unsigned long addr,
999 int avoid_reserve)
1000{
1001 return NULL;
1002}
1003
1004static inline struct page *
1005alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
1006 nodemask_t *nmask, gfp_t gfp_mask)
1007{
1008 return NULL;
1009}
1010
1011static inline struct page *alloc_huge_page_vma(struct hstate *h,
1012 struct vm_area_struct *vma,
1013 unsigned long address)
1014{
1015 return NULL;
1016}
1017
1018static inline int __alloc_bootmem_huge_page(struct hstate *h)
1019{
1020 return 0;
1021}
1022
1023static inline struct hstate *hstate_file(struct file *f)
1024{
1025 return NULL;
1026}
1027
1028static inline struct hstate *hstate_sizelog(int page_size_log)
1029{
1030 return NULL;
1031}
1032
1033static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
1034{
1035 return NULL;
1036}
1037
1038static inline struct hstate *page_hstate(struct page *page)
1039{
1040 return NULL;
1041}
1042
1043static inline struct hstate *size_to_hstate(unsigned long size)
1044{
1045 return NULL;
1046}
1047
1048static inline unsigned long huge_page_size(struct hstate *h)
1049{
1050 return PAGE_SIZE;
1051}
1052
1053static inline unsigned long huge_page_mask(struct hstate *h)
1054{
1055 return PAGE_MASK;
1056}
1057
1058static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
1059{
1060 return PAGE_SIZE;
1061}
1062
1063static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
1064{
1065 return PAGE_SIZE;
1066}
1067
1068static inline unsigned int huge_page_order(struct hstate *h)
1069{
1070 return 0;
1071}
1072
1073static inline unsigned int huge_page_shift(struct hstate *h)
1074{
1075 return PAGE_SHIFT;
1076}
1077
1078static inline bool hstate_is_gigantic(struct hstate *h)
1079{
1080 return false;
1081}
1082
1083static inline unsigned int pages_per_huge_page(struct hstate *h)
1084{
1085 return 1;
1086}
1087
1088static inline unsigned hstate_index_to_shift(unsigned index)
1089{
1090 return 0;
1091}
1092
1093static inline int hstate_index(struct hstate *h)
1094{
1095 return 0;
1096}
1097
1098static inline int dissolve_free_huge_page(struct page *page)
1099{
1100 return 0;
1101}
1102
1103static inline int dissolve_free_huge_pages(unsigned long start_pfn,
1104 unsigned long end_pfn)
1105{
1106 return 0;
1107}
1108
1109static inline bool hugepage_migration_supported(struct hstate *h)
1110{
1111 return false;
1112}
1113
1114static inline bool hugepage_movable_supported(struct hstate *h)
1115{
1116 return false;
1117}
1118
1119static inline gfp_t htlb_alloc_mask(struct hstate *h)
1120{
1121 return 0;
1122}
1123
1124static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
1125{
1126 return 0;
1127}
1128
1129static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
1130 struct mm_struct *mm, pte_t *pte)
1131{
1132 return &mm->page_table_lock;
1133}
1134
1135static inline void hugetlb_count_init(struct mm_struct *mm)
1136{
1137}
1138
1139static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
1140{
1141}
1142
1143static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
1144{
1145}
1146
1147static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
1148 unsigned long addr, pte_t *ptep)
1149{
1150 return *ptep;
1151}
1152
1153static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
1154 pte_t *ptep, pte_t pte)
1155{
1156}
1157
1158static inline void hugetlb_register_node(struct node *node)
1159{
1160}
1161
1162static inline void hugetlb_unregister_node(struct node *node)
1163{
1164}
1165#endif /* CONFIG_HUGETLB_PAGE */
1166
1167static inline spinlock_t *huge_pte_lock(struct hstate *h,
1168 struct mm_struct *mm, pte_t *pte)
1169{
1170 spinlock_t *ptl;
1171
1172 ptl = huge_pte_lockptr(h, mm, pte);
1173 spin_lock(ptl);
1174 return ptl;
1175}
1176
1177#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
1178extern void __init hugetlb_cma_reserve(int order);
1179#else
1180static inline __init void hugetlb_cma_reserve(int order)
1181{
1182}
1183#endif
1184
1185bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
1186
1187#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
1188/*
1189 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
1190 * implement this.
1191 */
1192#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
1193#endif
1194
1195#endif /* _LINUX_HUGETLB_H */