Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_HUGETLB_H
3#define _LINUX_HUGETLB_H
4
5#include <linux/mm_types.h>
6#include <linux/mmdebug.h>
7#include <linux/fs.h>
8#include <linux/hugetlb_inline.h>
9#include <linux/cgroup.h>
10#include <linux/list.h>
11#include <linux/kref.h>
12#include <linux/pgtable.h>
13#include <linux/gfp.h>
14
15struct ctl_table;
16struct user_struct;
17struct mmu_gather;
18
19#ifndef is_hugepd
20typedef struct { unsigned long pd; } hugepd_t;
21#define is_hugepd(hugepd) (0)
22#define __hugepd(x) ((hugepd_t) { (x) })
23#endif
24
25#ifdef CONFIG_HUGETLB_PAGE
26
27#include <linux/mempolicy.h>
28#include <linux/shm.h>
29#include <asm/tlbflush.h>
30
31struct hugepage_subpool {
32 spinlock_t lock;
33 long count;
34 long max_hpages; /* Maximum huge pages or -1 if no maximum. */
35 long used_hpages; /* Used count against maximum, includes */
36 /* both alloced and reserved pages. */
37 struct hstate *hstate;
38 long min_hpages; /* Minimum huge pages or -1 if no minimum. */
39 long rsv_hpages; /* Pages reserved against global pool to */
40 /* satisfy minimum size. */
41};
42
43struct resv_map {
44 struct kref refs;
45 spinlock_t lock;
46 struct list_head regions;
47 long adds_in_progress;
48 struct list_head region_cache;
49 long region_cache_count;
50#ifdef CONFIG_CGROUP_HUGETLB
51 /*
52 * On private mappings, the counter to uncharge reservations is stored
53 * here. If these fields are 0, then either the mapping is shared, or
54 * cgroup accounting is disabled for this resv_map.
55 */
56 struct page_counter *reservation_counter;
57 unsigned long pages_per_hpage;
58 struct cgroup_subsys_state *css;
59#endif
60};
61
62/*
63 * Region tracking -- allows tracking of reservations and instantiated pages
64 * across the pages in a mapping.
65 *
66 * The region data structures are embedded into a resv_map and protected
67 * by a resv_map's lock. The set of regions within the resv_map represent
68 * reservations for huge pages, or huge pages that have already been
69 * instantiated within the map. The from and to elements are huge page
70 * indicies into the associated mapping. from indicates the starting index
71 * of the region. to represents the first index past the end of the region.
72 *
73 * For example, a file region structure with from == 0 and to == 4 represents
74 * four huge pages in a mapping. It is important to note that the to element
75 * represents the first element past the end of the region. This is used in
76 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
77 *
78 * Interval notation of the form [from, to) will be used to indicate that
79 * the endpoint from is inclusive and to is exclusive.
80 */
81struct file_region {
82 struct list_head link;
83 long from;
84 long to;
85#ifdef CONFIG_CGROUP_HUGETLB
86 /*
87 * On shared mappings, each reserved region appears as a struct
88 * file_region in resv_map. These fields hold the info needed to
89 * uncharge each reservation.
90 */
91 struct page_counter *reservation_counter;
92 struct cgroup_subsys_state *css;
93#endif
94};
95
96extern struct resv_map *resv_map_alloc(void);
97void resv_map_release(struct kref *ref);
98
99extern spinlock_t hugetlb_lock;
100extern int hugetlb_max_hstate __read_mostly;
101#define for_each_hstate(h) \
102 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
103
104struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
105 long min_hpages);
106void hugepage_put_subpool(struct hugepage_subpool *spool);
107
108void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
109int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
110int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *,
111 loff_t *);
112int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *,
113 loff_t *);
114int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *,
115 loff_t *);
116
117int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
118long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
119 struct page **, struct vm_area_struct **,
120 unsigned long *, unsigned long *, long, unsigned int,
121 int *);
122void unmap_hugepage_range(struct vm_area_struct *,
123 unsigned long, unsigned long, struct page *);
124void __unmap_hugepage_range_final(struct mmu_gather *tlb,
125 struct vm_area_struct *vma,
126 unsigned long start, unsigned long end,
127 struct page *ref_page);
128void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
129 unsigned long start, unsigned long end,
130 struct page *ref_page);
131void hugetlb_report_meminfo(struct seq_file *);
132int hugetlb_report_node_meminfo(char *buf, int len, int nid);
133void hugetlb_show_meminfo(void);
134unsigned long hugetlb_total_pages(void);
135vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
136 unsigned long address, unsigned int flags);
137int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
138 struct vm_area_struct *dst_vma,
139 unsigned long dst_addr,
140 unsigned long src_addr,
141 struct page **pagep);
142bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
143 struct vm_area_struct *vma,
144 vm_flags_t vm_flags);
145long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
146 long freed);
147bool isolate_huge_page(struct page *page, struct list_head *list);
148void putback_active_hugepage(struct page *page);
149void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
150void free_huge_page(struct page *page);
151void hugetlb_fix_reserve_counts(struct inode *inode);
152extern struct mutex *hugetlb_fault_mutex_table;
153u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
154
155pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
156
157struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
158
159extern int sysctl_hugetlb_shm_group;
160extern struct list_head huge_boot_pages;
161
162/* arch callbacks */
163
164pte_t *huge_pte_alloc(struct mm_struct *mm,
165 unsigned long addr, unsigned long sz);
166pte_t *huge_pte_offset(struct mm_struct *mm,
167 unsigned long addr, unsigned long sz);
168int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
169 unsigned long *addr, pte_t *ptep);
170void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
171 unsigned long *start, unsigned long *end);
172struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
173 int write);
174struct page *follow_huge_pd(struct vm_area_struct *vma,
175 unsigned long address, hugepd_t hpd,
176 int flags, int pdshift);
177struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
178 pmd_t *pmd, int flags);
179struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
180 pud_t *pud, int flags);
181struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
182 pgd_t *pgd, int flags);
183
184int pmd_huge(pmd_t pmd);
185int pud_huge(pud_t pud);
186unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
187 unsigned long address, unsigned long end, pgprot_t newprot);
188
189bool is_hugetlb_entry_migration(pte_t pte);
190
191#else /* !CONFIG_HUGETLB_PAGE */
192
193static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
194{
195}
196
197static inline unsigned long hugetlb_total_pages(void)
198{
199 return 0;
200}
201
202static inline struct address_space *hugetlb_page_mapping_lock_write(
203 struct page *hpage)
204{
205 return NULL;
206}
207
208static inline int huge_pmd_unshare(struct mm_struct *mm,
209 struct vm_area_struct *vma,
210 unsigned long *addr, pte_t *ptep)
211{
212 return 0;
213}
214
215static inline void adjust_range_if_pmd_sharing_possible(
216 struct vm_area_struct *vma,
217 unsigned long *start, unsigned long *end)
218{
219}
220
221static inline long follow_hugetlb_page(struct mm_struct *mm,
222 struct vm_area_struct *vma, struct page **pages,
223 struct vm_area_struct **vmas, unsigned long *position,
224 unsigned long *nr_pages, long i, unsigned int flags,
225 int *nonblocking)
226{
227 BUG();
228 return 0;
229}
230
231static inline struct page *follow_huge_addr(struct mm_struct *mm,
232 unsigned long address, int write)
233{
234 return ERR_PTR(-EINVAL);
235}
236
237static inline int copy_hugetlb_page_range(struct mm_struct *dst,
238 struct mm_struct *src, struct vm_area_struct *vma)
239{
240 BUG();
241 return 0;
242}
243
244static inline void hugetlb_report_meminfo(struct seq_file *m)
245{
246}
247
248static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
249{
250 return 0;
251}
252
253static inline void hugetlb_show_meminfo(void)
254{
255}
256
257static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
258 unsigned long address, hugepd_t hpd, int flags,
259 int pdshift)
260{
261 return NULL;
262}
263
264static inline struct page *follow_huge_pmd(struct mm_struct *mm,
265 unsigned long address, pmd_t *pmd, int flags)
266{
267 return NULL;
268}
269
270static inline struct page *follow_huge_pud(struct mm_struct *mm,
271 unsigned long address, pud_t *pud, int flags)
272{
273 return NULL;
274}
275
276static inline struct page *follow_huge_pgd(struct mm_struct *mm,
277 unsigned long address, pgd_t *pgd, int flags)
278{
279 return NULL;
280}
281
282static inline int prepare_hugepage_range(struct file *file,
283 unsigned long addr, unsigned long len)
284{
285 return -EINVAL;
286}
287
288static inline int pmd_huge(pmd_t pmd)
289{
290 return 0;
291}
292
293static inline int pud_huge(pud_t pud)
294{
295 return 0;
296}
297
298static inline int is_hugepage_only_range(struct mm_struct *mm,
299 unsigned long addr, unsigned long len)
300{
301 return 0;
302}
303
304static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
305 unsigned long addr, unsigned long end,
306 unsigned long floor, unsigned long ceiling)
307{
308 BUG();
309}
310
311static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
312 pte_t *dst_pte,
313 struct vm_area_struct *dst_vma,
314 unsigned long dst_addr,
315 unsigned long src_addr,
316 struct page **pagep)
317{
318 BUG();
319 return 0;
320}
321
322static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
323 unsigned long sz)
324{
325 return NULL;
326}
327
328static inline bool isolate_huge_page(struct page *page, struct list_head *list)
329{
330 return false;
331}
332
333static inline void putback_active_hugepage(struct page *page)
334{
335}
336
337static inline void move_hugetlb_state(struct page *oldpage,
338 struct page *newpage, int reason)
339{
340}
341
342static inline unsigned long hugetlb_change_protection(
343 struct vm_area_struct *vma, unsigned long address,
344 unsigned long end, pgprot_t newprot)
345{
346 return 0;
347}
348
349static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
350 struct vm_area_struct *vma, unsigned long start,
351 unsigned long end, struct page *ref_page)
352{
353 BUG();
354}
355
356static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
357 struct vm_area_struct *vma, unsigned long start,
358 unsigned long end, struct page *ref_page)
359{
360 BUG();
361}
362
363static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
364 struct vm_area_struct *vma, unsigned long address,
365 unsigned int flags)
366{
367 BUG();
368 return 0;
369}
370
371#endif /* !CONFIG_HUGETLB_PAGE */
372/*
373 * hugepages at page global directory. If arch support
374 * hugepages at pgd level, they need to define this.
375 */
376#ifndef pgd_huge
377#define pgd_huge(x) 0
378#endif
379#ifndef p4d_huge
380#define p4d_huge(x) 0
381#endif
382
383#ifndef pgd_write
384static inline int pgd_write(pgd_t pgd)
385{
386 BUG();
387 return 0;
388}
389#endif
390
391#define HUGETLB_ANON_FILE "anon_hugepage"
392
393enum {
394 /*
395 * The file will be used as an shm file so shmfs accounting rules
396 * apply
397 */
398 HUGETLB_SHMFS_INODE = 1,
399 /*
400 * The file is being created on the internal vfs mount and shmfs
401 * accounting rules do not apply
402 */
403 HUGETLB_ANONHUGE_INODE = 2,
404};
405
406#ifdef CONFIG_HUGETLBFS
407struct hugetlbfs_sb_info {
408 long max_inodes; /* inodes allowed */
409 long free_inodes; /* inodes free */
410 spinlock_t stat_lock;
411 struct hstate *hstate;
412 struct hugepage_subpool *spool;
413 kuid_t uid;
414 kgid_t gid;
415 umode_t mode;
416};
417
418static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
419{
420 return sb->s_fs_info;
421}
422
423struct hugetlbfs_inode_info {
424 struct shared_policy policy;
425 struct inode vfs_inode;
426 unsigned int seals;
427};
428
429static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
430{
431 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
432}
433
434extern const struct file_operations hugetlbfs_file_operations;
435extern const struct vm_operations_struct hugetlb_vm_ops;
436struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
437 struct user_struct **user, int creat_flags,
438 int page_size_log);
439
440static inline bool is_file_hugepages(struct file *file)
441{
442 if (file->f_op == &hugetlbfs_file_operations)
443 return true;
444
445 return is_file_shm_hugepages(file);
446}
447
448static inline struct hstate *hstate_inode(struct inode *i)
449{
450 return HUGETLBFS_SB(i->i_sb)->hstate;
451}
452#else /* !CONFIG_HUGETLBFS */
453
454#define is_file_hugepages(file) false
455static inline struct file *
456hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
457 struct user_struct **user, int creat_flags,
458 int page_size_log)
459{
460 return ERR_PTR(-ENOSYS);
461}
462
463static inline struct hstate *hstate_inode(struct inode *i)
464{
465 return NULL;
466}
467#endif /* !CONFIG_HUGETLBFS */
468
469#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
470unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
471 unsigned long len, unsigned long pgoff,
472 unsigned long flags);
473#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
474
475/*
476 * huegtlb page specific state flags. These flags are located in page.private
477 * of the hugetlb head page. Functions created via the below macros should be
478 * used to manipulate these flags.
479 *
480 * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at
481 * allocation time. Cleared when page is fully instantiated. Free
482 * routine checks flag to restore a reservation on error paths.
483 * Synchronization: Examined or modified by code that knows it has
484 * the only reference to page. i.e. After allocation but before use
485 * or when the page is being freed.
486 * HPG_migratable - Set after a newly allocated page is added to the page
487 * cache and/or page tables. Indicates the page is a candidate for
488 * migration.
489 * Synchronization: Initially set after new page allocation with no
490 * locking. When examined and modified during migration processing
491 * (isolate, migrate, putback) the hugetlb_lock is held.
492 * HPG_temporary - - Set on a page that is temporarily allocated from the buddy
493 * allocator. Typically used for migration target pages when no pages
494 * are available in the pool. The hugetlb free page path will
495 * immediately free pages with this flag set to the buddy allocator.
496 * Synchronization: Can be set after huge page allocation from buddy when
497 * code knows it has only reference. All other examinations and
498 * modifications require hugetlb_lock.
499 * HPG_freed - Set when page is on the free lists.
500 * Synchronization: hugetlb_lock held for examination and modification.
501 */
502enum hugetlb_page_flags {
503 HPG_restore_reserve = 0,
504 HPG_migratable,
505 HPG_temporary,
506 HPG_freed,
507 __NR_HPAGEFLAGS,
508};
509
510/*
511 * Macros to create test, set and clear function definitions for
512 * hugetlb specific page flags.
513 */
514#ifdef CONFIG_HUGETLB_PAGE
515#define TESTHPAGEFLAG(uname, flname) \
516static inline int HPage##uname(struct page *page) \
517 { return test_bit(HPG_##flname, &(page->private)); }
518
519#define SETHPAGEFLAG(uname, flname) \
520static inline void SetHPage##uname(struct page *page) \
521 { set_bit(HPG_##flname, &(page->private)); }
522
523#define CLEARHPAGEFLAG(uname, flname) \
524static inline void ClearHPage##uname(struct page *page) \
525 { clear_bit(HPG_##flname, &(page->private)); }
526#else
527#define TESTHPAGEFLAG(uname, flname) \
528static inline int HPage##uname(struct page *page) \
529 { return 0; }
530
531#define SETHPAGEFLAG(uname, flname) \
532static inline void SetHPage##uname(struct page *page) \
533 { }
534
535#define CLEARHPAGEFLAG(uname, flname) \
536static inline void ClearHPage##uname(struct page *page) \
537 { }
538#endif
539
540#define HPAGEFLAG(uname, flname) \
541 TESTHPAGEFLAG(uname, flname) \
542 SETHPAGEFLAG(uname, flname) \
543 CLEARHPAGEFLAG(uname, flname) \
544
545/*
546 * Create functions associated with hugetlb page flags
547 */
548HPAGEFLAG(RestoreReserve, restore_reserve)
549HPAGEFLAG(Migratable, migratable)
550HPAGEFLAG(Temporary, temporary)
551HPAGEFLAG(Freed, freed)
552
553#ifdef CONFIG_HUGETLB_PAGE
554
555#define HSTATE_NAME_LEN 32
556/* Defines one hugetlb page size */
557struct hstate {
558 int next_nid_to_alloc;
559 int next_nid_to_free;
560 unsigned int order;
561 unsigned long mask;
562 unsigned long max_huge_pages;
563 unsigned long nr_huge_pages;
564 unsigned long free_huge_pages;
565 unsigned long resv_huge_pages;
566 unsigned long surplus_huge_pages;
567 unsigned long nr_overcommit_huge_pages;
568 struct list_head hugepage_activelist;
569 struct list_head hugepage_freelists[MAX_NUMNODES];
570 unsigned int nr_huge_pages_node[MAX_NUMNODES];
571 unsigned int free_huge_pages_node[MAX_NUMNODES];
572 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
573#ifdef CONFIG_CGROUP_HUGETLB
574 /* cgroup control files */
575 struct cftype cgroup_files_dfl[7];
576 struct cftype cgroup_files_legacy[9];
577#endif
578 char name[HSTATE_NAME_LEN];
579};
580
581struct huge_bootmem_page {
582 struct list_head list;
583 struct hstate *hstate;
584};
585
586struct page *alloc_huge_page(struct vm_area_struct *vma,
587 unsigned long addr, int avoid_reserve);
588struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
589 nodemask_t *nmask, gfp_t gfp_mask);
590struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
591 unsigned long address);
592int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
593 pgoff_t idx);
594
595/* arch callback */
596int __init __alloc_bootmem_huge_page(struct hstate *h);
597int __init alloc_bootmem_huge_page(struct hstate *h);
598
599void __init hugetlb_add_hstate(unsigned order);
600bool __init arch_hugetlb_valid_size(unsigned long size);
601struct hstate *size_to_hstate(unsigned long size);
602
603#ifndef HUGE_MAX_HSTATE
604#define HUGE_MAX_HSTATE 1
605#endif
606
607extern struct hstate hstates[HUGE_MAX_HSTATE];
608extern unsigned int default_hstate_idx;
609
610#define default_hstate (hstates[default_hstate_idx])
611
612/*
613 * hugetlb page subpool pointer located in hpage[1].private
614 */
615static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
616{
617 return (struct hugepage_subpool *)(hpage+1)->private;
618}
619
620static inline void hugetlb_set_page_subpool(struct page *hpage,
621 struct hugepage_subpool *subpool)
622{
623 set_page_private(hpage+1, (unsigned long)subpool);
624}
625
626static inline struct hstate *hstate_file(struct file *f)
627{
628 return hstate_inode(file_inode(f));
629}
630
631static inline struct hstate *hstate_sizelog(int page_size_log)
632{
633 if (!page_size_log)
634 return &default_hstate;
635
636 return size_to_hstate(1UL << page_size_log);
637}
638
639static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
640{
641 return hstate_file(vma->vm_file);
642}
643
644static inline unsigned long huge_page_size(struct hstate *h)
645{
646 return (unsigned long)PAGE_SIZE << h->order;
647}
648
649extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
650
651extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
652
653static inline unsigned long huge_page_mask(struct hstate *h)
654{
655 return h->mask;
656}
657
658static inline unsigned int huge_page_order(struct hstate *h)
659{
660 return h->order;
661}
662
663static inline unsigned huge_page_shift(struct hstate *h)
664{
665 return h->order + PAGE_SHIFT;
666}
667
668static inline bool hstate_is_gigantic(struct hstate *h)
669{
670 return huge_page_order(h) >= MAX_ORDER;
671}
672
673static inline unsigned int pages_per_huge_page(struct hstate *h)
674{
675 return 1 << h->order;
676}
677
678static inline unsigned int blocks_per_huge_page(struct hstate *h)
679{
680 return huge_page_size(h) / 512;
681}
682
683#include <asm/hugetlb.h>
684
685#ifndef is_hugepage_only_range
686static inline int is_hugepage_only_range(struct mm_struct *mm,
687 unsigned long addr, unsigned long len)
688{
689 return 0;
690}
691#define is_hugepage_only_range is_hugepage_only_range
692#endif
693
694#ifndef arch_clear_hugepage_flags
695static inline void arch_clear_hugepage_flags(struct page *page) { }
696#define arch_clear_hugepage_flags arch_clear_hugepage_flags
697#endif
698
699#ifndef arch_make_huge_pte
700static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
701 struct page *page, int writable)
702{
703 return entry;
704}
705#endif
706
707static inline struct hstate *page_hstate(struct page *page)
708{
709 VM_BUG_ON_PAGE(!PageHuge(page), page);
710 return size_to_hstate(page_size(page));
711}
712
713static inline unsigned hstate_index_to_shift(unsigned index)
714{
715 return hstates[index].order + PAGE_SHIFT;
716}
717
718static inline int hstate_index(struct hstate *h)
719{
720 return h - hstates;
721}
722
723pgoff_t __basepage_index(struct page *page);
724
725/* Return page->index in PAGE_SIZE units */
726static inline pgoff_t basepage_index(struct page *page)
727{
728 if (!PageCompound(page))
729 return page->index;
730
731 return __basepage_index(page);
732}
733
734extern int dissolve_free_huge_page(struct page *page);
735extern int dissolve_free_huge_pages(unsigned long start_pfn,
736 unsigned long end_pfn);
737
738#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
739#ifndef arch_hugetlb_migration_supported
740static inline bool arch_hugetlb_migration_supported(struct hstate *h)
741{
742 if ((huge_page_shift(h) == PMD_SHIFT) ||
743 (huge_page_shift(h) == PUD_SHIFT) ||
744 (huge_page_shift(h) == PGDIR_SHIFT))
745 return true;
746 else
747 return false;
748}
749#endif
750#else
751static inline bool arch_hugetlb_migration_supported(struct hstate *h)
752{
753 return false;
754}
755#endif
756
757static inline bool hugepage_migration_supported(struct hstate *h)
758{
759 return arch_hugetlb_migration_supported(h);
760}
761
762/*
763 * Movability check is different as compared to migration check.
764 * It determines whether or not a huge page should be placed on
765 * movable zone or not. Movability of any huge page should be
766 * required only if huge page size is supported for migration.
767 * There wont be any reason for the huge page to be movable if
768 * it is not migratable to start with. Also the size of the huge
769 * page should be large enough to be placed under a movable zone
770 * and still feasible enough to be migratable. Just the presence
771 * in movable zone does not make the migration feasible.
772 *
773 * So even though large huge page sizes like the gigantic ones
774 * are migratable they should not be movable because its not
775 * feasible to migrate them from movable zone.
776 */
777static inline bool hugepage_movable_supported(struct hstate *h)
778{
779 if (!hugepage_migration_supported(h))
780 return false;
781
782 if (hstate_is_gigantic(h))
783 return false;
784 return true;
785}
786
787/* Movability of hugepages depends on migration support. */
788static inline gfp_t htlb_alloc_mask(struct hstate *h)
789{
790 if (hugepage_movable_supported(h))
791 return GFP_HIGHUSER_MOVABLE;
792 else
793 return GFP_HIGHUSER;
794}
795
796static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
797{
798 gfp_t modified_mask = htlb_alloc_mask(h);
799
800 /* Some callers might want to enforce node */
801 modified_mask |= (gfp_mask & __GFP_THISNODE);
802
803 modified_mask |= (gfp_mask & __GFP_NOWARN);
804
805 return modified_mask;
806}
807
808static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
809 struct mm_struct *mm, pte_t *pte)
810{
811 if (huge_page_size(h) == PMD_SIZE)
812 return pmd_lockptr(mm, (pmd_t *) pte);
813 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
814 return &mm->page_table_lock;
815}
816
817#ifndef hugepages_supported
818/*
819 * Some platform decide whether they support huge pages at boot
820 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
821 * when there is no such support
822 */
823#define hugepages_supported() (HPAGE_SHIFT != 0)
824#endif
825
826void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
827
828static inline void hugetlb_count_add(long l, struct mm_struct *mm)
829{
830 atomic_long_add(l, &mm->hugetlb_usage);
831}
832
833static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
834{
835 atomic_long_sub(l, &mm->hugetlb_usage);
836}
837
838#ifndef set_huge_swap_pte_at
839static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
840 pte_t *ptep, pte_t pte, unsigned long sz)
841{
842 set_huge_pte_at(mm, addr, ptep, pte);
843}
844#endif
845
846#ifndef huge_ptep_modify_prot_start
847#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
848static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
849 unsigned long addr, pte_t *ptep)
850{
851 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
852}
853#endif
854
855#ifndef huge_ptep_modify_prot_commit
856#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
857static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
858 unsigned long addr, pte_t *ptep,
859 pte_t old_pte, pte_t pte)
860{
861 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
862}
863#endif
864
865#else /* CONFIG_HUGETLB_PAGE */
866struct hstate {};
867
868static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
869 unsigned long addr,
870 int avoid_reserve)
871{
872 return NULL;
873}
874
875static inline struct page *
876alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
877 nodemask_t *nmask, gfp_t gfp_mask)
878{
879 return NULL;
880}
881
882static inline struct page *alloc_huge_page_vma(struct hstate *h,
883 struct vm_area_struct *vma,
884 unsigned long address)
885{
886 return NULL;
887}
888
889static inline int __alloc_bootmem_huge_page(struct hstate *h)
890{
891 return 0;
892}
893
894static inline struct hstate *hstate_file(struct file *f)
895{
896 return NULL;
897}
898
899static inline struct hstate *hstate_sizelog(int page_size_log)
900{
901 return NULL;
902}
903
904static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
905{
906 return NULL;
907}
908
909static inline struct hstate *page_hstate(struct page *page)
910{
911 return NULL;
912}
913
914static inline unsigned long huge_page_size(struct hstate *h)
915{
916 return PAGE_SIZE;
917}
918
919static inline unsigned long huge_page_mask(struct hstate *h)
920{
921 return PAGE_MASK;
922}
923
924static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
925{
926 return PAGE_SIZE;
927}
928
929static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
930{
931 return PAGE_SIZE;
932}
933
934static inline unsigned int huge_page_order(struct hstate *h)
935{
936 return 0;
937}
938
939static inline unsigned int huge_page_shift(struct hstate *h)
940{
941 return PAGE_SHIFT;
942}
943
944static inline bool hstate_is_gigantic(struct hstate *h)
945{
946 return false;
947}
948
949static inline unsigned int pages_per_huge_page(struct hstate *h)
950{
951 return 1;
952}
953
954static inline unsigned hstate_index_to_shift(unsigned index)
955{
956 return 0;
957}
958
959static inline int hstate_index(struct hstate *h)
960{
961 return 0;
962}
963
964static inline pgoff_t basepage_index(struct page *page)
965{
966 return page->index;
967}
968
969static inline int dissolve_free_huge_page(struct page *page)
970{
971 return 0;
972}
973
974static inline int dissolve_free_huge_pages(unsigned long start_pfn,
975 unsigned long end_pfn)
976{
977 return 0;
978}
979
980static inline bool hugepage_migration_supported(struct hstate *h)
981{
982 return false;
983}
984
985static inline bool hugepage_movable_supported(struct hstate *h)
986{
987 return false;
988}
989
990static inline gfp_t htlb_alloc_mask(struct hstate *h)
991{
992 return 0;
993}
994
995static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
996{
997 return 0;
998}
999
1000static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
1001 struct mm_struct *mm, pte_t *pte)
1002{
1003 return &mm->page_table_lock;
1004}
1005
1006static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
1007{
1008}
1009
1010static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
1011{
1012}
1013
1014static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
1015 pte_t *ptep, pte_t pte, unsigned long sz)
1016{
1017}
1018#endif /* CONFIG_HUGETLB_PAGE */
1019
1020static inline spinlock_t *huge_pte_lock(struct hstate *h,
1021 struct mm_struct *mm, pte_t *pte)
1022{
1023 spinlock_t *ptl;
1024
1025 ptl = huge_pte_lockptr(h, mm, pte);
1026 spin_lock(ptl);
1027 return ptl;
1028}
1029
1030#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
1031extern void __init hugetlb_cma_reserve(int order);
1032extern void __init hugetlb_cma_check(void);
1033#else
1034static inline __init void hugetlb_cma_reserve(int order)
1035{
1036}
1037static inline __init void hugetlb_cma_check(void)
1038{
1039}
1040#endif
1041
1042#endif /* _LINUX_HUGETLB_H */