Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_HUGETLB_H
3#define _LINUX_HUGETLB_H
4
5#include <linux/mm_types.h>
6#include <linux/mmdebug.h>
7#include <linux/fs.h>
8#include <linux/hugetlb_inline.h>
9#include <linux/cgroup.h>
10#include <linux/list.h>
11#include <linux/kref.h>
12#include <asm/pgtable.h>
13
14struct ctl_table;
15struct user_struct;
16struct mmu_gather;
17
18#ifndef is_hugepd
19typedef struct { unsigned long pd; } hugepd_t;
20#define is_hugepd(hugepd) (0)
21#define __hugepd(x) ((hugepd_t) { (x) })
22#endif
23
24#ifdef CONFIG_HUGETLB_PAGE
25
26#include <linux/mempolicy.h>
27#include <linux/shm.h>
28#include <asm/tlbflush.h>
29
30struct hugepage_subpool {
31 spinlock_t lock;
32 long count;
33 long max_hpages; /* Maximum huge pages or -1 if no maximum. */
34 long used_hpages; /* Used count against maximum, includes */
35 /* both alloced and reserved pages. */
36 struct hstate *hstate;
37 long min_hpages; /* Minimum huge pages or -1 if no minimum. */
38 long rsv_hpages; /* Pages reserved against global pool to */
39 /* sasitfy minimum size. */
40};
41
42struct resv_map {
43 struct kref refs;
44 spinlock_t lock;
45 struct list_head regions;
46 long adds_in_progress;
47 struct list_head region_cache;
48 long region_cache_count;
49};
50extern struct resv_map *resv_map_alloc(void);
51void resv_map_release(struct kref *ref);
52
53extern spinlock_t hugetlb_lock;
54extern int hugetlb_max_hstate __read_mostly;
55#define for_each_hstate(h) \
56 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
57
58struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
59 long min_hpages);
60void hugepage_put_subpool(struct hugepage_subpool *spool);
61
62void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
63int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
64int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
65int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
66
67#ifdef CONFIG_NUMA
68int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
69 void __user *, size_t *, loff_t *);
70#endif
71
72int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
73long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
74 struct page **, struct vm_area_struct **,
75 unsigned long *, unsigned long *, long, unsigned int,
76 int *);
77void unmap_hugepage_range(struct vm_area_struct *,
78 unsigned long, unsigned long, struct page *);
79void __unmap_hugepage_range_final(struct mmu_gather *tlb,
80 struct vm_area_struct *vma,
81 unsigned long start, unsigned long end,
82 struct page *ref_page);
83void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
84 unsigned long start, unsigned long end,
85 struct page *ref_page);
86void hugetlb_report_meminfo(struct seq_file *);
87int hugetlb_report_node_meminfo(int, char *);
88void hugetlb_show_meminfo(void);
89unsigned long hugetlb_total_pages(void);
90vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
91 unsigned long address, unsigned int flags);
92int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
93 struct vm_area_struct *dst_vma,
94 unsigned long dst_addr,
95 unsigned long src_addr,
96 struct page **pagep);
97int hugetlb_reserve_pages(struct inode *inode, long from, long to,
98 struct vm_area_struct *vma,
99 vm_flags_t vm_flags);
100long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
101 long freed);
102bool isolate_huge_page(struct page *page, struct list_head *list);
103void putback_active_hugepage(struct page *page);
104void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
105void free_huge_page(struct page *page);
106void hugetlb_fix_reserve_counts(struct inode *inode);
107extern struct mutex *hugetlb_fault_mutex_table;
108u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
109
110pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
111
112extern int sysctl_hugetlb_shm_group;
113extern struct list_head huge_boot_pages;
114
115/* arch callbacks */
116
117pte_t *huge_pte_alloc(struct mm_struct *mm,
118 unsigned long addr, unsigned long sz);
119pte_t *huge_pte_offset(struct mm_struct *mm,
120 unsigned long addr, unsigned long sz);
121int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
122void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
123 unsigned long *start, unsigned long *end);
124struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
125 int write);
126struct page *follow_huge_pd(struct vm_area_struct *vma,
127 unsigned long address, hugepd_t hpd,
128 int flags, int pdshift);
129struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
130 pmd_t *pmd, int flags);
131struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
132 pud_t *pud, int flags);
133struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
134 pgd_t *pgd, int flags);
135
136int pmd_huge(pmd_t pmd);
137int pud_huge(pud_t pud);
138unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
139 unsigned long address, unsigned long end, pgprot_t newprot);
140
141bool is_hugetlb_entry_migration(pte_t pte);
142
143#else /* !CONFIG_HUGETLB_PAGE */
144
145static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
146{
147}
148
149static inline unsigned long hugetlb_total_pages(void)
150{
151 return 0;
152}
153
154static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
155 pte_t *ptep)
156{
157 return 0;
158}
159
160static inline void adjust_range_if_pmd_sharing_possible(
161 struct vm_area_struct *vma,
162 unsigned long *start, unsigned long *end)
163{
164}
165
166static inline long follow_hugetlb_page(struct mm_struct *mm,
167 struct vm_area_struct *vma, struct page **pages,
168 struct vm_area_struct **vmas, unsigned long *position,
169 unsigned long *nr_pages, long i, unsigned int flags,
170 int *nonblocking)
171{
172 BUG();
173 return 0;
174}
175
176static inline struct page *follow_huge_addr(struct mm_struct *mm,
177 unsigned long address, int write)
178{
179 return ERR_PTR(-EINVAL);
180}
181
182static inline int copy_hugetlb_page_range(struct mm_struct *dst,
183 struct mm_struct *src, struct vm_area_struct *vma)
184{
185 BUG();
186 return 0;
187}
188
189static inline void hugetlb_report_meminfo(struct seq_file *m)
190{
191}
192
193static inline int hugetlb_report_node_meminfo(int nid, char *buf)
194{
195 return 0;
196}
197
198static inline void hugetlb_show_meminfo(void)
199{
200}
201
202static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
203 unsigned long address, hugepd_t hpd, int flags,
204 int pdshift)
205{
206 return NULL;
207}
208
209static inline struct page *follow_huge_pmd(struct mm_struct *mm,
210 unsigned long address, pmd_t *pmd, int flags)
211{
212 return NULL;
213}
214
215static inline struct page *follow_huge_pud(struct mm_struct *mm,
216 unsigned long address, pud_t *pud, int flags)
217{
218 return NULL;
219}
220
221static inline struct page *follow_huge_pgd(struct mm_struct *mm,
222 unsigned long address, pgd_t *pgd, int flags)
223{
224 return NULL;
225}
226
227static inline int prepare_hugepage_range(struct file *file,
228 unsigned long addr, unsigned long len)
229{
230 return -EINVAL;
231}
232
233static inline int pmd_huge(pmd_t pmd)
234{
235 return 0;
236}
237
238static inline int pud_huge(pud_t pud)
239{
240 return 0;
241}
242
243static inline int is_hugepage_only_range(struct mm_struct *mm,
244 unsigned long addr, unsigned long len)
245{
246 return 0;
247}
248
249static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
250 unsigned long addr, unsigned long end,
251 unsigned long floor, unsigned long ceiling)
252{
253 BUG();
254}
255
256static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
257 pte_t *dst_pte,
258 struct vm_area_struct *dst_vma,
259 unsigned long dst_addr,
260 unsigned long src_addr,
261 struct page **pagep)
262{
263 BUG();
264 return 0;
265}
266
267static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
268 unsigned long sz)
269{
270 return NULL;
271}
272
273static inline bool isolate_huge_page(struct page *page, struct list_head *list)
274{
275 return false;
276}
277
278static inline void putback_active_hugepage(struct page *page)
279{
280}
281
282static inline void move_hugetlb_state(struct page *oldpage,
283 struct page *newpage, int reason)
284{
285}
286
287static inline unsigned long hugetlb_change_protection(
288 struct vm_area_struct *vma, unsigned long address,
289 unsigned long end, pgprot_t newprot)
290{
291 return 0;
292}
293
294static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
295 struct vm_area_struct *vma, unsigned long start,
296 unsigned long end, struct page *ref_page)
297{
298 BUG();
299}
300
301static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
302 struct vm_area_struct *vma, unsigned long start,
303 unsigned long end, struct page *ref_page)
304{
305 BUG();
306}
307
308static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
309 struct vm_area_struct *vma, unsigned long address,
310 unsigned int flags)
311{
312 BUG();
313 return 0;
314}
315
316#endif /* !CONFIG_HUGETLB_PAGE */
317/*
318 * hugepages at page global directory. If arch support
319 * hugepages at pgd level, they need to define this.
320 */
321#ifndef pgd_huge
322#define pgd_huge(x) 0
323#endif
324#ifndef p4d_huge
325#define p4d_huge(x) 0
326#endif
327
328#ifndef pgd_write
329static inline int pgd_write(pgd_t pgd)
330{
331 BUG();
332 return 0;
333}
334#endif
335
336#define HUGETLB_ANON_FILE "anon_hugepage"
337
338enum {
339 /*
340 * The file will be used as an shm file so shmfs accounting rules
341 * apply
342 */
343 HUGETLB_SHMFS_INODE = 1,
344 /*
345 * The file is being created on the internal vfs mount and shmfs
346 * accounting rules do not apply
347 */
348 HUGETLB_ANONHUGE_INODE = 2,
349};
350
351#ifdef CONFIG_HUGETLBFS
352struct hugetlbfs_sb_info {
353 long max_inodes; /* inodes allowed */
354 long free_inodes; /* inodes free */
355 spinlock_t stat_lock;
356 struct hstate *hstate;
357 struct hugepage_subpool *spool;
358 kuid_t uid;
359 kgid_t gid;
360 umode_t mode;
361};
362
363static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
364{
365 return sb->s_fs_info;
366}
367
368struct hugetlbfs_inode_info {
369 struct shared_policy policy;
370 struct inode vfs_inode;
371 unsigned int seals;
372};
373
374static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
375{
376 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
377}
378
379extern const struct file_operations hugetlbfs_file_operations;
380extern const struct vm_operations_struct hugetlb_vm_ops;
381struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
382 struct user_struct **user, int creat_flags,
383 int page_size_log);
384
385static inline bool is_file_hugepages(struct file *file)
386{
387 if (file->f_op == &hugetlbfs_file_operations)
388 return true;
389
390 return is_file_shm_hugepages(file);
391}
392
393
394#else /* !CONFIG_HUGETLBFS */
395
396#define is_file_hugepages(file) false
397static inline struct file *
398hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
399 struct user_struct **user, int creat_flags,
400 int page_size_log)
401{
402 return ERR_PTR(-ENOSYS);
403}
404
405#endif /* !CONFIG_HUGETLBFS */
406
407#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
408unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
409 unsigned long len, unsigned long pgoff,
410 unsigned long flags);
411#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
412
413#ifdef CONFIG_HUGETLB_PAGE
414
415#define HSTATE_NAME_LEN 32
416/* Defines one hugetlb page size */
417struct hstate {
418 int next_nid_to_alloc;
419 int next_nid_to_free;
420 unsigned int order;
421 unsigned long mask;
422 unsigned long max_huge_pages;
423 unsigned long nr_huge_pages;
424 unsigned long free_huge_pages;
425 unsigned long resv_huge_pages;
426 unsigned long surplus_huge_pages;
427 unsigned long nr_overcommit_huge_pages;
428 struct list_head hugepage_activelist;
429 struct list_head hugepage_freelists[MAX_NUMNODES];
430 unsigned int nr_huge_pages_node[MAX_NUMNODES];
431 unsigned int free_huge_pages_node[MAX_NUMNODES];
432 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
433#ifdef CONFIG_CGROUP_HUGETLB
434 /* cgroup control files */
435 struct cftype cgroup_files_dfl[5];
436 struct cftype cgroup_files_legacy[5];
437#endif
438 char name[HSTATE_NAME_LEN];
439};
440
441struct huge_bootmem_page {
442 struct list_head list;
443 struct hstate *hstate;
444};
445
446struct page *alloc_huge_page(struct vm_area_struct *vma,
447 unsigned long addr, int avoid_reserve);
448struct page *alloc_huge_page_node(struct hstate *h, int nid);
449struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
450 nodemask_t *nmask);
451struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
452 unsigned long address);
453struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
454 int nid, nodemask_t *nmask);
455int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
456 pgoff_t idx);
457
458/* arch callback */
459int __init __alloc_bootmem_huge_page(struct hstate *h);
460int __init alloc_bootmem_huge_page(struct hstate *h);
461
462void __init hugetlb_bad_size(void);
463void __init hugetlb_add_hstate(unsigned order);
464struct hstate *size_to_hstate(unsigned long size);
465
466#ifndef HUGE_MAX_HSTATE
467#define HUGE_MAX_HSTATE 1
468#endif
469
470extern struct hstate hstates[HUGE_MAX_HSTATE];
471extern unsigned int default_hstate_idx;
472
473#define default_hstate (hstates[default_hstate_idx])
474
475static inline struct hstate *hstate_inode(struct inode *i)
476{
477 return HUGETLBFS_SB(i->i_sb)->hstate;
478}
479
480static inline struct hstate *hstate_file(struct file *f)
481{
482 return hstate_inode(file_inode(f));
483}
484
485static inline struct hstate *hstate_sizelog(int page_size_log)
486{
487 if (!page_size_log)
488 return &default_hstate;
489
490 return size_to_hstate(1UL << page_size_log);
491}
492
493static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
494{
495 return hstate_file(vma->vm_file);
496}
497
498static inline unsigned long huge_page_size(struct hstate *h)
499{
500 return (unsigned long)PAGE_SIZE << h->order;
501}
502
503extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
504
505extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
506
507static inline unsigned long huge_page_mask(struct hstate *h)
508{
509 return h->mask;
510}
511
512static inline unsigned int huge_page_order(struct hstate *h)
513{
514 return h->order;
515}
516
517static inline unsigned huge_page_shift(struct hstate *h)
518{
519 return h->order + PAGE_SHIFT;
520}
521
522static inline bool hstate_is_gigantic(struct hstate *h)
523{
524 return huge_page_order(h) >= MAX_ORDER;
525}
526
527static inline unsigned int pages_per_huge_page(struct hstate *h)
528{
529 return 1 << h->order;
530}
531
532static inline unsigned int blocks_per_huge_page(struct hstate *h)
533{
534 return huge_page_size(h) / 512;
535}
536
537#include <asm/hugetlb.h>
538
539#ifndef arch_make_huge_pte
540static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
541 struct page *page, int writable)
542{
543 return entry;
544}
545#endif
546
547static inline struct hstate *page_hstate(struct page *page)
548{
549 VM_BUG_ON_PAGE(!PageHuge(page), page);
550 return size_to_hstate(page_size(page));
551}
552
553static inline unsigned hstate_index_to_shift(unsigned index)
554{
555 return hstates[index].order + PAGE_SHIFT;
556}
557
558static inline int hstate_index(struct hstate *h)
559{
560 return h - hstates;
561}
562
563pgoff_t __basepage_index(struct page *page);
564
565/* Return page->index in PAGE_SIZE units */
566static inline pgoff_t basepage_index(struct page *page)
567{
568 if (!PageCompound(page))
569 return page->index;
570
571 return __basepage_index(page);
572}
573
574extern int dissolve_free_huge_page(struct page *page);
575extern int dissolve_free_huge_pages(unsigned long start_pfn,
576 unsigned long end_pfn);
577
578#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
579#ifndef arch_hugetlb_migration_supported
580static inline bool arch_hugetlb_migration_supported(struct hstate *h)
581{
582 if ((huge_page_shift(h) == PMD_SHIFT) ||
583 (huge_page_shift(h) == PUD_SHIFT) ||
584 (huge_page_shift(h) == PGDIR_SHIFT))
585 return true;
586 else
587 return false;
588}
589#endif
590#else
591static inline bool arch_hugetlb_migration_supported(struct hstate *h)
592{
593 return false;
594}
595#endif
596
597static inline bool hugepage_migration_supported(struct hstate *h)
598{
599 return arch_hugetlb_migration_supported(h);
600}
601
602/*
603 * Movability check is different as compared to migration check.
604 * It determines whether or not a huge page should be placed on
605 * movable zone or not. Movability of any huge page should be
606 * required only if huge page size is supported for migration.
607 * There wont be any reason for the huge page to be movable if
608 * it is not migratable to start with. Also the size of the huge
609 * page should be large enough to be placed under a movable zone
610 * and still feasible enough to be migratable. Just the presence
611 * in movable zone does not make the migration feasible.
612 *
613 * So even though large huge page sizes like the gigantic ones
614 * are migratable they should not be movable because its not
615 * feasible to migrate them from movable zone.
616 */
617static inline bool hugepage_movable_supported(struct hstate *h)
618{
619 if (!hugepage_migration_supported(h))
620 return false;
621
622 if (hstate_is_gigantic(h))
623 return false;
624 return true;
625}
626
627static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
628 struct mm_struct *mm, pte_t *pte)
629{
630 if (huge_page_size(h) == PMD_SIZE)
631 return pmd_lockptr(mm, (pmd_t *) pte);
632 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
633 return &mm->page_table_lock;
634}
635
636#ifndef hugepages_supported
637/*
638 * Some platform decide whether they support huge pages at boot
639 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
640 * when there is no such support
641 */
642#define hugepages_supported() (HPAGE_SHIFT != 0)
643#endif
644
645void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
646
647static inline void hugetlb_count_add(long l, struct mm_struct *mm)
648{
649 atomic_long_add(l, &mm->hugetlb_usage);
650}
651
652static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
653{
654 atomic_long_sub(l, &mm->hugetlb_usage);
655}
656
657#ifndef set_huge_swap_pte_at
658static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
659 pte_t *ptep, pte_t pte, unsigned long sz)
660{
661 set_huge_pte_at(mm, addr, ptep, pte);
662}
663#endif
664
665#ifndef huge_ptep_modify_prot_start
666#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
667static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
668 unsigned long addr, pte_t *ptep)
669{
670 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
671}
672#endif
673
674#ifndef huge_ptep_modify_prot_commit
675#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
676static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
677 unsigned long addr, pte_t *ptep,
678 pte_t old_pte, pte_t pte)
679{
680 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
681}
682#endif
683
684#else /* CONFIG_HUGETLB_PAGE */
685struct hstate {};
686
687static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
688 unsigned long addr,
689 int avoid_reserve)
690{
691 return NULL;
692}
693
694static inline struct page *alloc_huge_page_node(struct hstate *h, int nid)
695{
696 return NULL;
697}
698
699static inline struct page *
700alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask)
701{
702 return NULL;
703}
704
705static inline struct page *alloc_huge_page_vma(struct hstate *h,
706 struct vm_area_struct *vma,
707 unsigned long address)
708{
709 return NULL;
710}
711
712static inline int __alloc_bootmem_huge_page(struct hstate *h)
713{
714 return 0;
715}
716
717static inline struct hstate *hstate_file(struct file *f)
718{
719 return NULL;
720}
721
722static inline struct hstate *hstate_sizelog(int page_size_log)
723{
724 return NULL;
725}
726
727static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
728{
729 return NULL;
730}
731
732static inline struct hstate *hstate_inode(struct inode *i)
733{
734 return NULL;
735}
736
737static inline struct hstate *page_hstate(struct page *page)
738{
739 return NULL;
740}
741
742static inline unsigned long huge_page_size(struct hstate *h)
743{
744 return PAGE_SIZE;
745}
746
747static inline unsigned long huge_page_mask(struct hstate *h)
748{
749 return PAGE_MASK;
750}
751
752static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
753{
754 return PAGE_SIZE;
755}
756
757static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
758{
759 return PAGE_SIZE;
760}
761
762static inline unsigned int huge_page_order(struct hstate *h)
763{
764 return 0;
765}
766
767static inline unsigned int huge_page_shift(struct hstate *h)
768{
769 return PAGE_SHIFT;
770}
771
772static inline bool hstate_is_gigantic(struct hstate *h)
773{
774 return false;
775}
776
777static inline unsigned int pages_per_huge_page(struct hstate *h)
778{
779 return 1;
780}
781
782static inline unsigned hstate_index_to_shift(unsigned index)
783{
784 return 0;
785}
786
787static inline int hstate_index(struct hstate *h)
788{
789 return 0;
790}
791
792static inline pgoff_t basepage_index(struct page *page)
793{
794 return page->index;
795}
796
797static inline int dissolve_free_huge_page(struct page *page)
798{
799 return 0;
800}
801
802static inline int dissolve_free_huge_pages(unsigned long start_pfn,
803 unsigned long end_pfn)
804{
805 return 0;
806}
807
808static inline bool hugepage_migration_supported(struct hstate *h)
809{
810 return false;
811}
812
813static inline bool hugepage_movable_supported(struct hstate *h)
814{
815 return false;
816}
817
818static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
819 struct mm_struct *mm, pte_t *pte)
820{
821 return &mm->page_table_lock;
822}
823
824static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
825{
826}
827
828static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
829{
830}
831
832static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
833 pte_t *ptep, pte_t pte, unsigned long sz)
834{
835}
836#endif /* CONFIG_HUGETLB_PAGE */
837
838static inline spinlock_t *huge_pte_lock(struct hstate *h,
839 struct mm_struct *mm, pte_t *pte)
840{
841 spinlock_t *ptl;
842
843 ptl = huge_pte_lockptr(h, mm, pte);
844 spin_lock(ptl);
845 return ptl;
846}
847
848#endif /* _LINUX_HUGETLB_H */