Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_PGTABLE_H
3#define _ASM_X86_PGTABLE_H
4
5#include <linux/mem_encrypt.h>
6#include <asm/page.h>
7#include <asm/pgtable_types.h>
8
9/*
10 * Macro to mark a page protection value as UC-
11 */
12#define pgprot_noncached(prot) \
13 ((boot_cpu_data.x86 > 3) \
14 ? (__pgprot(pgprot_val(prot) | \
15 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS))) \
16 : (prot))
17
18#ifndef __ASSEMBLY__
19#include <linux/spinlock.h>
20#include <asm/x86_init.h>
21#include <asm/pkru.h>
22#include <asm/fpu/api.h>
23#include <asm/coco.h>
24#include <asm-generic/pgtable_uffd.h>
25#include <linux/page_table_check.h>
26
27extern pgd_t early_top_pgt[PTRS_PER_PGD];
28bool __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
29
30struct seq_file;
31void ptdump_walk_pgd_level(struct seq_file *m, struct mm_struct *mm);
32void ptdump_walk_pgd_level_debugfs(struct seq_file *m, struct mm_struct *mm,
33 bool user);
34bool ptdump_walk_pgd_level_checkwx(void);
35#define ptdump_check_wx ptdump_walk_pgd_level_checkwx
36void ptdump_walk_user_pgd_level_checkwx(void);
37
38/*
39 * Macros to add or remove encryption attribute
40 */
41#define pgprot_encrypted(prot) __pgprot(cc_mkenc(pgprot_val(prot)))
42#define pgprot_decrypted(prot) __pgprot(cc_mkdec(pgprot_val(prot)))
43
44#ifdef CONFIG_DEBUG_WX
45#define debug_checkwx_user() ptdump_walk_user_pgd_level_checkwx()
46#else
47#define debug_checkwx_user() do { } while (0)
48#endif
49
50/*
51 * ZERO_PAGE is a global shared page that is always zero: used
52 * for zero-mapped memory areas etc..
53 */
54extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
55 __visible;
56#define ZERO_PAGE(vaddr) ((void)(vaddr),virt_to_page(empty_zero_page))
57
58extern spinlock_t pgd_lock;
59extern struct list_head pgd_list;
60
61extern struct mm_struct *pgd_page_get_mm(struct page *page);
62
63extern pmdval_t early_pmd_flags;
64
65#ifdef CONFIG_PARAVIRT_XXL
66#include <asm/paravirt.h>
67#else /* !CONFIG_PARAVIRT_XXL */
68#define set_pte(ptep, pte) native_set_pte(ptep, pte)
69
70#define set_pte_atomic(ptep, pte) \
71 native_set_pte_atomic(ptep, pte)
72
73#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
74
75#ifndef __PAGETABLE_P4D_FOLDED
76#define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
77#define pgd_clear(pgd) (pgtable_l5_enabled() ? native_pgd_clear(pgd) : 0)
78#endif
79
80#ifndef set_p4d
81# define set_p4d(p4dp, p4d) native_set_p4d(p4dp, p4d)
82#endif
83
84#ifndef __PAGETABLE_PUD_FOLDED
85#define p4d_clear(p4d) native_p4d_clear(p4d)
86#endif
87
88#ifndef set_pud
89# define set_pud(pudp, pud) native_set_pud(pudp, pud)
90#endif
91
92#ifndef __PAGETABLE_PUD_FOLDED
93#define pud_clear(pud) native_pud_clear(pud)
94#endif
95
96#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
97#define pmd_clear(pmd) native_pmd_clear(pmd)
98
99#define pgd_val(x) native_pgd_val(x)
100#define __pgd(x) native_make_pgd(x)
101
102#ifndef __PAGETABLE_P4D_FOLDED
103#define p4d_val(x) native_p4d_val(x)
104#define __p4d(x) native_make_p4d(x)
105#endif
106
107#ifndef __PAGETABLE_PUD_FOLDED
108#define pud_val(x) native_pud_val(x)
109#define __pud(x) native_make_pud(x)
110#endif
111
112#ifndef __PAGETABLE_PMD_FOLDED
113#define pmd_val(x) native_pmd_val(x)
114#define __pmd(x) native_make_pmd(x)
115#endif
116
117#define pte_val(x) native_pte_val(x)
118#define __pte(x) native_make_pte(x)
119
120#define arch_end_context_switch(prev) do {} while(0)
121#endif /* CONFIG_PARAVIRT_XXL */
122
123static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
124{
125 pmdval_t v = native_pmd_val(pmd);
126
127 return native_make_pmd(v | set);
128}
129
130static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
131{
132 pmdval_t v = native_pmd_val(pmd);
133
134 return native_make_pmd(v & ~clear);
135}
136
137static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
138{
139 pudval_t v = native_pud_val(pud);
140
141 return native_make_pud(v | set);
142}
143
144static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
145{
146 pudval_t v = native_pud_val(pud);
147
148 return native_make_pud(v & ~clear);
149}
150
151/*
152 * The following only work if pte_present() is true.
153 * Undefined behaviour if not..
154 */
155static inline bool pte_dirty(pte_t pte)
156{
157 return pte_flags(pte) & _PAGE_DIRTY_BITS;
158}
159
160static inline bool pte_shstk(pte_t pte)
161{
162 return cpu_feature_enabled(X86_FEATURE_SHSTK) &&
163 (pte_flags(pte) & (_PAGE_RW | _PAGE_DIRTY)) == _PAGE_DIRTY;
164}
165
166static inline int pte_young(pte_t pte)
167{
168 return pte_flags(pte) & _PAGE_ACCESSED;
169}
170
171static inline bool pte_decrypted(pte_t pte)
172{
173 return cc_mkdec(pte_val(pte)) == pte_val(pte);
174}
175
176#define pmd_dirty pmd_dirty
177static inline bool pmd_dirty(pmd_t pmd)
178{
179 return pmd_flags(pmd) & _PAGE_DIRTY_BITS;
180}
181
182static inline bool pmd_shstk(pmd_t pmd)
183{
184 return cpu_feature_enabled(X86_FEATURE_SHSTK) &&
185 (pmd_flags(pmd) & (_PAGE_RW | _PAGE_DIRTY | _PAGE_PSE)) ==
186 (_PAGE_DIRTY | _PAGE_PSE);
187}
188
189#define pmd_young pmd_young
190static inline int pmd_young(pmd_t pmd)
191{
192 return pmd_flags(pmd) & _PAGE_ACCESSED;
193}
194
195static inline bool pud_dirty(pud_t pud)
196{
197 return pud_flags(pud) & _PAGE_DIRTY_BITS;
198}
199
200static inline int pud_young(pud_t pud)
201{
202 return pud_flags(pud) & _PAGE_ACCESSED;
203}
204
205static inline bool pud_shstk(pud_t pud)
206{
207 return cpu_feature_enabled(X86_FEATURE_SHSTK) &&
208 (pud_flags(pud) & (_PAGE_RW | _PAGE_DIRTY | _PAGE_PSE)) ==
209 (_PAGE_DIRTY | _PAGE_PSE);
210}
211
212static inline int pte_write(pte_t pte)
213{
214 /*
215 * Shadow stack pages are logically writable, but do not have
216 * _PAGE_RW. Check for them separately from _PAGE_RW itself.
217 */
218 return (pte_flags(pte) & _PAGE_RW) || pte_shstk(pte);
219}
220
221#define pmd_write pmd_write
222static inline int pmd_write(pmd_t pmd)
223{
224 /*
225 * Shadow stack pages are logically writable, but do not have
226 * _PAGE_RW. Check for them separately from _PAGE_RW itself.
227 */
228 return (pmd_flags(pmd) & _PAGE_RW) || pmd_shstk(pmd);
229}
230
231#define pud_write pud_write
232static inline int pud_write(pud_t pud)
233{
234 return pud_flags(pud) & _PAGE_RW;
235}
236
237static inline int pte_huge(pte_t pte)
238{
239 return pte_flags(pte) & _PAGE_PSE;
240}
241
242static inline int pte_global(pte_t pte)
243{
244 return pte_flags(pte) & _PAGE_GLOBAL;
245}
246
247static inline int pte_exec(pte_t pte)
248{
249 return !(pte_flags(pte) & _PAGE_NX);
250}
251
252static inline int pte_special(pte_t pte)
253{
254 return pte_flags(pte) & _PAGE_SPECIAL;
255}
256
257/* Entries that were set to PROT_NONE are inverted */
258
259static inline u64 protnone_mask(u64 val);
260
261#define PFN_PTE_SHIFT PAGE_SHIFT
262
263static inline unsigned long pte_pfn(pte_t pte)
264{
265 phys_addr_t pfn = pte_val(pte);
266 pfn ^= protnone_mask(pfn);
267 return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT;
268}
269
270static inline unsigned long pmd_pfn(pmd_t pmd)
271{
272 phys_addr_t pfn = pmd_val(pmd);
273 pfn ^= protnone_mask(pfn);
274 return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
275}
276
277#define pud_pfn pud_pfn
278static inline unsigned long pud_pfn(pud_t pud)
279{
280 phys_addr_t pfn = pud_val(pud);
281 pfn ^= protnone_mask(pfn);
282 return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT;
283}
284
285static inline unsigned long p4d_pfn(p4d_t p4d)
286{
287 return (p4d_val(p4d) & p4d_pfn_mask(p4d)) >> PAGE_SHIFT;
288}
289
290static inline unsigned long pgd_pfn(pgd_t pgd)
291{
292 return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
293}
294
295#define p4d_leaf p4d_leaf
296static inline bool p4d_leaf(p4d_t p4d)
297{
298 /* No 512 GiB pages yet */
299 return 0;
300}
301
302#define pte_page(pte) pfn_to_page(pte_pfn(pte))
303
304#define pmd_leaf pmd_leaf
305static inline bool pmd_leaf(pmd_t pte)
306{
307 return pmd_flags(pte) & _PAGE_PSE;
308}
309
310#ifdef CONFIG_TRANSPARENT_HUGEPAGE
311/* NOTE: when predicate huge page, consider also pmd_devmap, or use pmd_leaf */
312static inline int pmd_trans_huge(pmd_t pmd)
313{
314 return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
315}
316
317#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
318static inline int pud_trans_huge(pud_t pud)
319{
320 return (pud_val(pud) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
321}
322#endif
323
324#define has_transparent_hugepage has_transparent_hugepage
325static inline int has_transparent_hugepage(void)
326{
327 return boot_cpu_has(X86_FEATURE_PSE);
328}
329
330#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
331static inline int pmd_devmap(pmd_t pmd)
332{
333 return !!(pmd_val(pmd) & _PAGE_DEVMAP);
334}
335
336#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
337static inline int pud_devmap(pud_t pud)
338{
339 return !!(pud_val(pud) & _PAGE_DEVMAP);
340}
341#else
342static inline int pud_devmap(pud_t pud)
343{
344 return 0;
345}
346#endif
347
348#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
349static inline bool pmd_special(pmd_t pmd)
350{
351 return pmd_flags(pmd) & _PAGE_SPECIAL;
352}
353
354static inline pmd_t pmd_mkspecial(pmd_t pmd)
355{
356 return pmd_set_flags(pmd, _PAGE_SPECIAL);
357}
358#endif /* CONFIG_ARCH_SUPPORTS_PMD_PFNMAP */
359
360#ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
361static inline bool pud_special(pud_t pud)
362{
363 return pud_flags(pud) & _PAGE_SPECIAL;
364}
365
366static inline pud_t pud_mkspecial(pud_t pud)
367{
368 return pud_set_flags(pud, _PAGE_SPECIAL);
369}
370#endif /* CONFIG_ARCH_SUPPORTS_PUD_PFNMAP */
371
372static inline int pgd_devmap(pgd_t pgd)
373{
374 return 0;
375}
376#endif
377#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
378
379static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
380{
381 pteval_t v = native_pte_val(pte);
382
383 return native_make_pte(v | set);
384}
385
386static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
387{
388 pteval_t v = native_pte_val(pte);
389
390 return native_make_pte(v & ~clear);
391}
392
393/*
394 * Write protection operations can result in Dirty=1,Write=0 PTEs. But in the
395 * case of X86_FEATURE_USER_SHSTK, these PTEs denote shadow stack memory. So
396 * when creating dirty, write-protected memory, a software bit is used:
397 * _PAGE_BIT_SAVED_DIRTY. The following functions take a PTE and transition the
398 * Dirty bit to SavedDirty, and vice-vesra.
399 *
400 * This shifting is only done if needed. In the case of shifting
401 * Dirty->SavedDirty, the condition is if the PTE is Write=0. In the case of
402 * shifting SavedDirty->Dirty, the condition is Write=1.
403 */
404static inline pgprotval_t mksaveddirty_shift(pgprotval_t v)
405{
406 pgprotval_t cond = (~v >> _PAGE_BIT_RW) & 1;
407
408 v |= ((v >> _PAGE_BIT_DIRTY) & cond) << _PAGE_BIT_SAVED_DIRTY;
409 v &= ~(cond << _PAGE_BIT_DIRTY);
410
411 return v;
412}
413
414static inline pgprotval_t clear_saveddirty_shift(pgprotval_t v)
415{
416 pgprotval_t cond = (v >> _PAGE_BIT_RW) & 1;
417
418 v |= ((v >> _PAGE_BIT_SAVED_DIRTY) & cond) << _PAGE_BIT_DIRTY;
419 v &= ~(cond << _PAGE_BIT_SAVED_DIRTY);
420
421 return v;
422}
423
424static inline pte_t pte_mksaveddirty(pte_t pte)
425{
426 pteval_t v = native_pte_val(pte);
427
428 v = mksaveddirty_shift(v);
429 return native_make_pte(v);
430}
431
432static inline pte_t pte_clear_saveddirty(pte_t pte)
433{
434 pteval_t v = native_pte_val(pte);
435
436 v = clear_saveddirty_shift(v);
437 return native_make_pte(v);
438}
439
440static inline pte_t pte_wrprotect(pte_t pte)
441{
442 pte = pte_clear_flags(pte, _PAGE_RW);
443
444 /*
445 * Blindly clearing _PAGE_RW might accidentally create
446 * a shadow stack PTE (Write=0,Dirty=1). Move the hardware
447 * dirty value to the software bit, if present.
448 */
449 return pte_mksaveddirty(pte);
450}
451
452#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
453static inline int pte_uffd_wp(pte_t pte)
454{
455 return pte_flags(pte) & _PAGE_UFFD_WP;
456}
457
458static inline pte_t pte_mkuffd_wp(pte_t pte)
459{
460 return pte_wrprotect(pte_set_flags(pte, _PAGE_UFFD_WP));
461}
462
463static inline pte_t pte_clear_uffd_wp(pte_t pte)
464{
465 return pte_clear_flags(pte, _PAGE_UFFD_WP);
466}
467#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
468
469static inline pte_t pte_mkclean(pte_t pte)
470{
471 return pte_clear_flags(pte, _PAGE_DIRTY_BITS);
472}
473
474static inline pte_t pte_mkold(pte_t pte)
475{
476 return pte_clear_flags(pte, _PAGE_ACCESSED);
477}
478
479static inline pte_t pte_mkexec(pte_t pte)
480{
481 return pte_clear_flags(pte, _PAGE_NX);
482}
483
484static inline pte_t pte_mkdirty(pte_t pte)
485{
486 pte = pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
487
488 return pte_mksaveddirty(pte);
489}
490
491static inline pte_t pte_mkwrite_shstk(pte_t pte)
492{
493 pte = pte_clear_flags(pte, _PAGE_RW);
494
495 return pte_set_flags(pte, _PAGE_DIRTY);
496}
497
498static inline pte_t pte_mkyoung(pte_t pte)
499{
500 return pte_set_flags(pte, _PAGE_ACCESSED);
501}
502
503static inline pte_t pte_mkwrite_novma(pte_t pte)
504{
505 return pte_set_flags(pte, _PAGE_RW);
506}
507
508struct vm_area_struct;
509pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma);
510#define pte_mkwrite pte_mkwrite
511
512static inline pte_t pte_mkhuge(pte_t pte)
513{
514 return pte_set_flags(pte, _PAGE_PSE);
515}
516
517static inline pte_t pte_clrhuge(pte_t pte)
518{
519 return pte_clear_flags(pte, _PAGE_PSE);
520}
521
522static inline pte_t pte_mkglobal(pte_t pte)
523{
524 return pte_set_flags(pte, _PAGE_GLOBAL);
525}
526
527static inline pte_t pte_clrglobal(pte_t pte)
528{
529 return pte_clear_flags(pte, _PAGE_GLOBAL);
530}
531
532static inline pte_t pte_mkspecial(pte_t pte)
533{
534 return pte_set_flags(pte, _PAGE_SPECIAL);
535}
536
537static inline pte_t pte_mkdevmap(pte_t pte)
538{
539 return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP);
540}
541
542/* See comments above mksaveddirty_shift() */
543static inline pmd_t pmd_mksaveddirty(pmd_t pmd)
544{
545 pmdval_t v = native_pmd_val(pmd);
546
547 v = mksaveddirty_shift(v);
548 return native_make_pmd(v);
549}
550
551/* See comments above mksaveddirty_shift() */
552static inline pmd_t pmd_clear_saveddirty(pmd_t pmd)
553{
554 pmdval_t v = native_pmd_val(pmd);
555
556 v = clear_saveddirty_shift(v);
557 return native_make_pmd(v);
558}
559
560static inline pmd_t pmd_wrprotect(pmd_t pmd)
561{
562 pmd = pmd_clear_flags(pmd, _PAGE_RW);
563
564 /*
565 * Blindly clearing _PAGE_RW might accidentally create
566 * a shadow stack PMD (RW=0, Dirty=1). Move the hardware
567 * dirty value to the software bit.
568 */
569 return pmd_mksaveddirty(pmd);
570}
571
572#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
573static inline int pmd_uffd_wp(pmd_t pmd)
574{
575 return pmd_flags(pmd) & _PAGE_UFFD_WP;
576}
577
578static inline pmd_t pmd_mkuffd_wp(pmd_t pmd)
579{
580 return pmd_wrprotect(pmd_set_flags(pmd, _PAGE_UFFD_WP));
581}
582
583static inline pmd_t pmd_clear_uffd_wp(pmd_t pmd)
584{
585 return pmd_clear_flags(pmd, _PAGE_UFFD_WP);
586}
587#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
588
589static inline pmd_t pmd_mkold(pmd_t pmd)
590{
591 return pmd_clear_flags(pmd, _PAGE_ACCESSED);
592}
593
594static inline pmd_t pmd_mkclean(pmd_t pmd)
595{
596 return pmd_clear_flags(pmd, _PAGE_DIRTY_BITS);
597}
598
599static inline pmd_t pmd_mkdirty(pmd_t pmd)
600{
601 pmd = pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
602
603 return pmd_mksaveddirty(pmd);
604}
605
606static inline pmd_t pmd_mkwrite_shstk(pmd_t pmd)
607{
608 pmd = pmd_clear_flags(pmd, _PAGE_RW);
609
610 return pmd_set_flags(pmd, _PAGE_DIRTY);
611}
612
613static inline pmd_t pmd_mkdevmap(pmd_t pmd)
614{
615 return pmd_set_flags(pmd, _PAGE_DEVMAP);
616}
617
618static inline pmd_t pmd_mkhuge(pmd_t pmd)
619{
620 return pmd_set_flags(pmd, _PAGE_PSE);
621}
622
623static inline pmd_t pmd_mkyoung(pmd_t pmd)
624{
625 return pmd_set_flags(pmd, _PAGE_ACCESSED);
626}
627
628static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
629{
630 return pmd_set_flags(pmd, _PAGE_RW);
631}
632
633pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
634#define pmd_mkwrite pmd_mkwrite
635
636/* See comments above mksaveddirty_shift() */
637static inline pud_t pud_mksaveddirty(pud_t pud)
638{
639 pudval_t v = native_pud_val(pud);
640
641 v = mksaveddirty_shift(v);
642 return native_make_pud(v);
643}
644
645/* See comments above mksaveddirty_shift() */
646static inline pud_t pud_clear_saveddirty(pud_t pud)
647{
648 pudval_t v = native_pud_val(pud);
649
650 v = clear_saveddirty_shift(v);
651 return native_make_pud(v);
652}
653
654static inline pud_t pud_mkold(pud_t pud)
655{
656 return pud_clear_flags(pud, _PAGE_ACCESSED);
657}
658
659static inline pud_t pud_mkclean(pud_t pud)
660{
661 return pud_clear_flags(pud, _PAGE_DIRTY_BITS);
662}
663
664static inline pud_t pud_wrprotect(pud_t pud)
665{
666 pud = pud_clear_flags(pud, _PAGE_RW);
667
668 /*
669 * Blindly clearing _PAGE_RW might accidentally create
670 * a shadow stack PUD (RW=0, Dirty=1). Move the hardware
671 * dirty value to the software bit.
672 */
673 return pud_mksaveddirty(pud);
674}
675
676static inline pud_t pud_mkdirty(pud_t pud)
677{
678 pud = pud_set_flags(pud, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
679
680 return pud_mksaveddirty(pud);
681}
682
683static inline pud_t pud_mkdevmap(pud_t pud)
684{
685 return pud_set_flags(pud, _PAGE_DEVMAP);
686}
687
688static inline pud_t pud_mkhuge(pud_t pud)
689{
690 return pud_set_flags(pud, _PAGE_PSE);
691}
692
693static inline pud_t pud_mkyoung(pud_t pud)
694{
695 return pud_set_flags(pud, _PAGE_ACCESSED);
696}
697
698static inline pud_t pud_mkwrite(pud_t pud)
699{
700 pud = pud_set_flags(pud, _PAGE_RW);
701
702 return pud_clear_saveddirty(pud);
703}
704
705#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
706static inline int pte_soft_dirty(pte_t pte)
707{
708 return pte_flags(pte) & _PAGE_SOFT_DIRTY;
709}
710
711static inline int pmd_soft_dirty(pmd_t pmd)
712{
713 return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
714}
715
716static inline int pud_soft_dirty(pud_t pud)
717{
718 return pud_flags(pud) & _PAGE_SOFT_DIRTY;
719}
720
721static inline pte_t pte_mksoft_dirty(pte_t pte)
722{
723 return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
724}
725
726static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
727{
728 return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
729}
730
731static inline pud_t pud_mksoft_dirty(pud_t pud)
732{
733 return pud_set_flags(pud, _PAGE_SOFT_DIRTY);
734}
735
736static inline pte_t pte_clear_soft_dirty(pte_t pte)
737{
738 return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
739}
740
741static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
742{
743 return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
744}
745
746static inline pud_t pud_clear_soft_dirty(pud_t pud)
747{
748 return pud_clear_flags(pud, _PAGE_SOFT_DIRTY);
749}
750
751#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
752
753/*
754 * Mask out unsupported bits in a present pgprot. Non-present pgprots
755 * can use those bits for other purposes, so leave them be.
756 */
757static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
758{
759 pgprotval_t protval = pgprot_val(pgprot);
760
761 if (protval & _PAGE_PRESENT)
762 protval &= __supported_pte_mask;
763
764 return protval;
765}
766
767static inline pgprotval_t check_pgprot(pgprot_t pgprot)
768{
769 pgprotval_t massaged_val = massage_pgprot(pgprot);
770
771 /* mmdebug.h can not be included here because of dependencies */
772#ifdef CONFIG_DEBUG_VM
773 WARN_ONCE(pgprot_val(pgprot) != massaged_val,
774 "attempted to set unsupported pgprot: %016llx "
775 "bits: %016llx supported: %016llx\n",
776 (u64)pgprot_val(pgprot),
777 (u64)pgprot_val(pgprot) ^ massaged_val,
778 (u64)__supported_pte_mask);
779#endif
780
781 return massaged_val;
782}
783
784static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
785{
786 phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
787 pfn ^= protnone_mask(pgprot_val(pgprot));
788 pfn &= PTE_PFN_MASK;
789 return __pte(pfn | check_pgprot(pgprot));
790}
791
792static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
793{
794 phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
795 pfn ^= protnone_mask(pgprot_val(pgprot));
796 pfn &= PHYSICAL_PMD_PAGE_MASK;
797 return __pmd(pfn | check_pgprot(pgprot));
798}
799
800static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
801{
802 phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
803 pfn ^= protnone_mask(pgprot_val(pgprot));
804 pfn &= PHYSICAL_PUD_PAGE_MASK;
805 return __pud(pfn | check_pgprot(pgprot));
806}
807
808static inline pmd_t pmd_mkinvalid(pmd_t pmd)
809{
810 return pfn_pmd(pmd_pfn(pmd),
811 __pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
812}
813
814static inline pud_t pud_mkinvalid(pud_t pud)
815{
816 return pfn_pud(pud_pfn(pud),
817 __pgprot(pud_flags(pud) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
818}
819
820static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask);
821
822static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
823{
824 pteval_t val = pte_val(pte), oldval = val;
825 pte_t pte_result;
826
827 /*
828 * Chop off the NX bit (if present), and add the NX portion of
829 * the newprot (if present):
830 */
831 val &= _PAGE_CHG_MASK;
832 val |= check_pgprot(newprot) & ~_PAGE_CHG_MASK;
833 val = flip_protnone_guard(oldval, val, PTE_PFN_MASK);
834
835 pte_result = __pte(val);
836
837 /*
838 * To avoid creating Write=0,Dirty=1 PTEs, pte_modify() needs to avoid:
839 * 1. Marking Write=0 PTEs Dirty=1
840 * 2. Marking Dirty=1 PTEs Write=0
841 *
842 * The first case cannot happen because the _PAGE_CHG_MASK will filter
843 * out any Dirty bit passed in newprot. Handle the second case by
844 * going through the mksaveddirty exercise. Only do this if the old
845 * value was Write=1 to avoid doing this on Shadow Stack PTEs.
846 */
847 if (oldval & _PAGE_RW)
848 pte_result = pte_mksaveddirty(pte_result);
849 else
850 pte_result = pte_clear_saveddirty(pte_result);
851
852 return pte_result;
853}
854
855static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
856{
857 pmdval_t val = pmd_val(pmd), oldval = val;
858 pmd_t pmd_result;
859
860 val &= (_HPAGE_CHG_MASK & ~_PAGE_DIRTY);
861 val |= check_pgprot(newprot) & ~_HPAGE_CHG_MASK;
862 val = flip_protnone_guard(oldval, val, PHYSICAL_PMD_PAGE_MASK);
863
864 pmd_result = __pmd(val);
865
866 /*
867 * Avoid creating shadow stack PMD by accident. See comment in
868 * pte_modify().
869 */
870 if (oldval & _PAGE_RW)
871 pmd_result = pmd_mksaveddirty(pmd_result);
872 else
873 pmd_result = pmd_clear_saveddirty(pmd_result);
874
875 return pmd_result;
876}
877
878static inline pud_t pud_modify(pud_t pud, pgprot_t newprot)
879{
880 pudval_t val = pud_val(pud), oldval = val;
881 pud_t pud_result;
882
883 val &= _HPAGE_CHG_MASK;
884 val |= check_pgprot(newprot) & ~_HPAGE_CHG_MASK;
885 val = flip_protnone_guard(oldval, val, PHYSICAL_PUD_PAGE_MASK);
886
887 pud_result = __pud(val);
888
889 /*
890 * Avoid creating shadow stack PUD by accident. See comment in
891 * pte_modify().
892 */
893 if (oldval & _PAGE_RW)
894 pud_result = pud_mksaveddirty(pud_result);
895 else
896 pud_result = pud_clear_saveddirty(pud_result);
897
898 return pud_result;
899}
900
901/*
902 * mprotect needs to preserve PAT and encryption bits when updating
903 * vm_page_prot
904 */
905#define pgprot_modify pgprot_modify
906static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
907{
908 pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
909 pgprotval_t addbits = pgprot_val(newprot) & ~_PAGE_CHG_MASK;
910 return __pgprot(preservebits | addbits);
911}
912
913#define pte_pgprot(x) __pgprot(pte_flags(x))
914#define pmd_pgprot(x) __pgprot(pmd_flags(x))
915#define pud_pgprot(x) __pgprot(pud_flags(x))
916#define p4d_pgprot(x) __pgprot(p4d_flags(x))
917
918#define canon_pgprot(p) __pgprot(massage_pgprot(p))
919
920static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
921 enum page_cache_mode pcm,
922 enum page_cache_mode new_pcm)
923{
924 /*
925 * PAT type is always WB for untracked ranges, so no need to check.
926 */
927 if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
928 return 1;
929
930 /*
931 * Certain new memtypes are not allowed with certain
932 * requested memtype:
933 * - request is uncached, return cannot be write-back
934 * - request is write-combine, return cannot be write-back
935 * - request is write-through, return cannot be write-back
936 * - request is write-through, return cannot be write-combine
937 */
938 if ((pcm == _PAGE_CACHE_MODE_UC_MINUS &&
939 new_pcm == _PAGE_CACHE_MODE_WB) ||
940 (pcm == _PAGE_CACHE_MODE_WC &&
941 new_pcm == _PAGE_CACHE_MODE_WB) ||
942 (pcm == _PAGE_CACHE_MODE_WT &&
943 new_pcm == _PAGE_CACHE_MODE_WB) ||
944 (pcm == _PAGE_CACHE_MODE_WT &&
945 new_pcm == _PAGE_CACHE_MODE_WC)) {
946 return 0;
947 }
948
949 return 1;
950}
951
952pmd_t *populate_extra_pmd(unsigned long vaddr);
953pte_t *populate_extra_pte(unsigned long vaddr);
954
955#ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
956pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd);
957
958/*
959 * Take a PGD location (pgdp) and a pgd value that needs to be set there.
960 * Populates the user and returns the resulting PGD that must be set in
961 * the kernel copy of the page tables.
962 */
963static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
964{
965 if (!static_cpu_has(X86_FEATURE_PTI))
966 return pgd;
967 return __pti_set_user_pgtbl(pgdp, pgd);
968}
969#else /* CONFIG_MITIGATION_PAGE_TABLE_ISOLATION */
970static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
971{
972 return pgd;
973}
974#endif /* CONFIG_MITIGATION_PAGE_TABLE_ISOLATION */
975
976#endif /* __ASSEMBLY__ */
977
978
979#ifdef CONFIG_X86_32
980# include <asm/pgtable_32.h>
981#else
982# include <asm/pgtable_64.h>
983#endif
984
985#ifndef __ASSEMBLY__
986#include <linux/mm_types.h>
987#include <linux/mmdebug.h>
988#include <linux/log2.h>
989#include <asm/fixmap.h>
990
991static inline int pte_none(pte_t pte)
992{
993 return !(pte.pte & ~(_PAGE_KNL_ERRATUM_MASK));
994}
995
996#define __HAVE_ARCH_PTE_SAME
997static inline int pte_same(pte_t a, pte_t b)
998{
999 return a.pte == b.pte;
1000}
1001
1002static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr)
1003{
1004 if (__pte_needs_invert(pte_val(pte)))
1005 return __pte(pte_val(pte) - (nr << PFN_PTE_SHIFT));
1006 return __pte(pte_val(pte) + (nr << PFN_PTE_SHIFT));
1007}
1008#define pte_advance_pfn pte_advance_pfn
1009
1010static inline int pte_present(pte_t a)
1011{
1012 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
1013}
1014
1015#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
1016static inline int pte_devmap(pte_t a)
1017{
1018 return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP;
1019}
1020#endif
1021
1022#define pte_accessible pte_accessible
1023static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
1024{
1025 if (pte_flags(a) & _PAGE_PRESENT)
1026 return true;
1027
1028 if ((pte_flags(a) & _PAGE_PROTNONE) &&
1029 atomic_read(&mm->tlb_flush_pending))
1030 return true;
1031
1032 return false;
1033}
1034
1035static inline int pmd_present(pmd_t pmd)
1036{
1037 /*
1038 * Checking for _PAGE_PSE is needed too because
1039 * split_huge_page will temporarily clear the present bit (but
1040 * the _PAGE_PSE flag will remain set at all times while the
1041 * _PAGE_PRESENT bit is clear).
1042 */
1043 return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
1044}
1045
1046#ifdef CONFIG_NUMA_BALANCING
1047/*
1048 * These work without NUMA balancing but the kernel does not care. See the
1049 * comment in include/linux/pgtable.h
1050 */
1051static inline int pte_protnone(pte_t pte)
1052{
1053 return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT))
1054 == _PAGE_PROTNONE;
1055}
1056
1057static inline int pmd_protnone(pmd_t pmd)
1058{
1059 return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT))
1060 == _PAGE_PROTNONE;
1061}
1062#endif /* CONFIG_NUMA_BALANCING */
1063
1064static inline int pmd_none(pmd_t pmd)
1065{
1066 /* Only check low word on 32-bit platforms, since it might be
1067 out of sync with upper half. */
1068 unsigned long val = native_pmd_val(pmd);
1069 return (val & ~_PAGE_KNL_ERRATUM_MASK) == 0;
1070}
1071
1072static inline unsigned long pmd_page_vaddr(pmd_t pmd)
1073{
1074 return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd));
1075}
1076
1077/*
1078 * Currently stuck as a macro due to indirect forward reference to
1079 * linux/mmzone.h's __section_mem_map_addr() definition:
1080 */
1081#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1082
1083/*
1084 * Conversion functions: convert a page and protection to a page entry,
1085 * and a page entry and page directory to the page they refer to.
1086 *
1087 * (Currently stuck as a macro because of indirect forward reference
1088 * to linux/mm.h:page_to_nid())
1089 */
1090#define mk_pte(page, pgprot) \
1091({ \
1092 pgprot_t __pgprot = pgprot; \
1093 \
1094 WARN_ON_ONCE((pgprot_val(__pgprot) & (_PAGE_DIRTY | _PAGE_RW)) == \
1095 _PAGE_DIRTY); \
1096 pfn_pte(page_to_pfn(page), __pgprot); \
1097})
1098
1099static inline int pmd_bad(pmd_t pmd)
1100{
1101 return (pmd_flags(pmd) & ~(_PAGE_USER | _PAGE_ACCESSED)) !=
1102 (_KERNPG_TABLE & ~_PAGE_ACCESSED);
1103}
1104
1105static inline unsigned long pages_to_mb(unsigned long npg)
1106{
1107 return npg >> (20 - PAGE_SHIFT);
1108}
1109
1110#if CONFIG_PGTABLE_LEVELS > 2
1111static inline int pud_none(pud_t pud)
1112{
1113 return (native_pud_val(pud) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
1114}
1115
1116static inline int pud_present(pud_t pud)
1117{
1118 return pud_flags(pud) & _PAGE_PRESENT;
1119}
1120
1121static inline pmd_t *pud_pgtable(pud_t pud)
1122{
1123 return (pmd_t *)__va(pud_val(pud) & pud_pfn_mask(pud));
1124}
1125
1126/*
1127 * Currently stuck as a macro due to indirect forward reference to
1128 * linux/mmzone.h's __section_mem_map_addr() definition:
1129 */
1130#define pud_page(pud) pfn_to_page(pud_pfn(pud))
1131
1132#define pud_leaf pud_leaf
1133static inline bool pud_leaf(pud_t pud)
1134{
1135 return pud_val(pud) & _PAGE_PSE;
1136}
1137
1138static inline int pud_bad(pud_t pud)
1139{
1140 return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
1141}
1142#endif /* CONFIG_PGTABLE_LEVELS > 2 */
1143
1144#if CONFIG_PGTABLE_LEVELS > 3
1145static inline int p4d_none(p4d_t p4d)
1146{
1147 return (native_p4d_val(p4d) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
1148}
1149
1150static inline int p4d_present(p4d_t p4d)
1151{
1152 return p4d_flags(p4d) & _PAGE_PRESENT;
1153}
1154
1155static inline pud_t *p4d_pgtable(p4d_t p4d)
1156{
1157 return (pud_t *)__va(p4d_val(p4d) & p4d_pfn_mask(p4d));
1158}
1159
1160/*
1161 * Currently stuck as a macro due to indirect forward reference to
1162 * linux/mmzone.h's __section_mem_map_addr() definition:
1163 */
1164#define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
1165
1166static inline int p4d_bad(p4d_t p4d)
1167{
1168 unsigned long ignore_flags = _KERNPG_TABLE | _PAGE_USER;
1169
1170 if (IS_ENABLED(CONFIG_MITIGATION_PAGE_TABLE_ISOLATION))
1171 ignore_flags |= _PAGE_NX;
1172
1173 return (p4d_flags(p4d) & ~ignore_flags) != 0;
1174}
1175#endif /* CONFIG_PGTABLE_LEVELS > 3 */
1176
1177static inline unsigned long p4d_index(unsigned long address)
1178{
1179 return (address >> P4D_SHIFT) & (PTRS_PER_P4D - 1);
1180}
1181
1182#if CONFIG_PGTABLE_LEVELS > 4
1183static inline int pgd_present(pgd_t pgd)
1184{
1185 if (!pgtable_l5_enabled())
1186 return 1;
1187 return pgd_flags(pgd) & _PAGE_PRESENT;
1188}
1189
1190static inline unsigned long pgd_page_vaddr(pgd_t pgd)
1191{
1192 return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
1193}
1194
1195/*
1196 * Currently stuck as a macro due to indirect forward reference to
1197 * linux/mmzone.h's __section_mem_map_addr() definition:
1198 */
1199#define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
1200
1201/* to find an entry in a page-table-directory. */
1202static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
1203{
1204 if (!pgtable_l5_enabled())
1205 return (p4d_t *)pgd;
1206 return (p4d_t *)pgd_page_vaddr(*pgd) + p4d_index(address);
1207}
1208
1209static inline int pgd_bad(pgd_t pgd)
1210{
1211 unsigned long ignore_flags = _PAGE_USER;
1212
1213 if (!pgtable_l5_enabled())
1214 return 0;
1215
1216 if (IS_ENABLED(CONFIG_MITIGATION_PAGE_TABLE_ISOLATION))
1217 ignore_flags |= _PAGE_NX;
1218
1219 return (pgd_flags(pgd) & ~ignore_flags) != _KERNPG_TABLE;
1220}
1221
1222static inline int pgd_none(pgd_t pgd)
1223{
1224 if (!pgtable_l5_enabled())
1225 return 0;
1226 /*
1227 * There is no need to do a workaround for the KNL stray
1228 * A/D bit erratum here. PGDs only point to page tables
1229 * except on 32-bit non-PAE which is not supported on
1230 * KNL.
1231 */
1232 return !native_pgd_val(pgd);
1233}
1234#endif /* CONFIG_PGTABLE_LEVELS > 4 */
1235
1236#endif /* __ASSEMBLY__ */
1237
1238#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
1239#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
1240
1241#ifndef __ASSEMBLY__
1242
1243extern int direct_gbpages;
1244void init_mem_mapping(void);
1245void early_alloc_pgt_buf(void);
1246void __init poking_init(void);
1247unsigned long init_memory_mapping(unsigned long start,
1248 unsigned long end, pgprot_t prot);
1249
1250#ifdef CONFIG_X86_64
1251extern pgd_t trampoline_pgd_entry;
1252#endif
1253
1254/* local pte updates need not use xchg for locking */
1255static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
1256{
1257 pte_t res = *ptep;
1258
1259 /* Pure native function needs no input for mm, addr */
1260 native_pte_clear(NULL, 0, ptep);
1261 return res;
1262}
1263
1264static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
1265{
1266 pmd_t res = *pmdp;
1267
1268 native_pmd_clear(pmdp);
1269 return res;
1270}
1271
1272static inline pud_t native_local_pudp_get_and_clear(pud_t *pudp)
1273{
1274 pud_t res = *pudp;
1275
1276 native_pud_clear(pudp);
1277 return res;
1278}
1279
1280static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1281 pmd_t *pmdp, pmd_t pmd)
1282{
1283 page_table_check_pmd_set(mm, pmdp, pmd);
1284 set_pmd(pmdp, pmd);
1285}
1286
1287static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
1288 pud_t *pudp, pud_t pud)
1289{
1290 page_table_check_pud_set(mm, pudp, pud);
1291 native_set_pud(pudp, pud);
1292}
1293
1294/*
1295 * We only update the dirty/accessed state if we set
1296 * the dirty bit by hand in the kernel, since the hardware
1297 * will do the accessed bit for us, and we don't want to
1298 * race with other CPU's that might be updating the dirty
1299 * bit at the same time.
1300 */
1301struct vm_area_struct;
1302
1303#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1304extern int ptep_set_access_flags(struct vm_area_struct *vma,
1305 unsigned long address, pte_t *ptep,
1306 pte_t entry, int dirty);
1307
1308#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1309extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
1310 unsigned long addr, pte_t *ptep);
1311
1312#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1313extern int ptep_clear_flush_young(struct vm_area_struct *vma,
1314 unsigned long address, pte_t *ptep);
1315
1316#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1317static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
1318 pte_t *ptep)
1319{
1320 pte_t pte = native_ptep_get_and_clear(ptep);
1321 page_table_check_pte_clear(mm, pte);
1322 return pte;
1323}
1324
1325#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1326static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1327 unsigned long addr, pte_t *ptep,
1328 int full)
1329{
1330 pte_t pte;
1331 if (full) {
1332 /*
1333 * Full address destruction in progress; paravirt does not
1334 * care about updates and native needs no locking
1335 */
1336 pte = native_local_ptep_get_and_clear(ptep);
1337 page_table_check_pte_clear(mm, pte);
1338 } else {
1339 pte = ptep_get_and_clear(mm, addr, ptep);
1340 }
1341 return pte;
1342}
1343
1344#define __HAVE_ARCH_PTEP_SET_WRPROTECT
1345static inline void ptep_set_wrprotect(struct mm_struct *mm,
1346 unsigned long addr, pte_t *ptep)
1347{
1348 /*
1349 * Avoid accidentally creating shadow stack PTEs
1350 * (Write=0,Dirty=1). Use cmpxchg() to prevent races with
1351 * the hardware setting Dirty=1.
1352 */
1353 pte_t old_pte, new_pte;
1354
1355 old_pte = READ_ONCE(*ptep);
1356 do {
1357 new_pte = pte_wrprotect(old_pte);
1358 } while (!try_cmpxchg((long *)&ptep->pte, (long *)&old_pte, *(long *)&new_pte));
1359}
1360
1361#define flush_tlb_fix_spurious_fault(vma, address, ptep) do { } while (0)
1362
1363#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1364
1365#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1366extern int pmdp_set_access_flags(struct vm_area_struct *vma,
1367 unsigned long address, pmd_t *pmdp,
1368 pmd_t entry, int dirty);
1369extern int pudp_set_access_flags(struct vm_area_struct *vma,
1370 unsigned long address, pud_t *pudp,
1371 pud_t entry, int dirty);
1372
1373#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1374extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1375 unsigned long addr, pmd_t *pmdp);
1376extern int pudp_test_and_clear_young(struct vm_area_struct *vma,
1377 unsigned long addr, pud_t *pudp);
1378
1379#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1380extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
1381 unsigned long address, pmd_t *pmdp);
1382
1383
1384#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1385static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
1386 pmd_t *pmdp)
1387{
1388 pmd_t pmd = native_pmdp_get_and_clear(pmdp);
1389
1390 page_table_check_pmd_clear(mm, pmd);
1391
1392 return pmd;
1393}
1394
1395#define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
1396static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
1397 unsigned long addr, pud_t *pudp)
1398{
1399 pud_t pud = native_pudp_get_and_clear(pudp);
1400
1401 page_table_check_pud_clear(mm, pud);
1402
1403 return pud;
1404}
1405
1406#define __HAVE_ARCH_PMDP_SET_WRPROTECT
1407static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1408 unsigned long addr, pmd_t *pmdp)
1409{
1410 /*
1411 * Avoid accidentally creating shadow stack PTEs
1412 * (Write=0,Dirty=1). Use cmpxchg() to prevent races with
1413 * the hardware setting Dirty=1.
1414 */
1415 pmd_t old_pmd, new_pmd;
1416
1417 old_pmd = READ_ONCE(*pmdp);
1418 do {
1419 new_pmd = pmd_wrprotect(old_pmd);
1420 } while (!try_cmpxchg((long *)pmdp, (long *)&old_pmd, *(long *)&new_pmd));
1421}
1422
1423#ifndef pmdp_establish
1424#define pmdp_establish pmdp_establish
1425static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
1426 unsigned long address, pmd_t *pmdp, pmd_t pmd)
1427{
1428 page_table_check_pmd_set(vma->vm_mm, pmdp, pmd);
1429 if (IS_ENABLED(CONFIG_SMP)) {
1430 return xchg(pmdp, pmd);
1431 } else {
1432 pmd_t old = *pmdp;
1433 WRITE_ONCE(*pmdp, pmd);
1434 return old;
1435 }
1436}
1437#endif
1438
1439#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1440static inline pud_t pudp_establish(struct vm_area_struct *vma,
1441 unsigned long address, pud_t *pudp, pud_t pud)
1442{
1443 page_table_check_pud_set(vma->vm_mm, pudp, pud);
1444 if (IS_ENABLED(CONFIG_SMP)) {
1445 return xchg(pudp, pud);
1446 } else {
1447 pud_t old = *pudp;
1448 WRITE_ONCE(*pudp, pud);
1449 return old;
1450 }
1451}
1452#endif
1453
1454#define __HAVE_ARCH_PMDP_INVALIDATE_AD
1455extern pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma,
1456 unsigned long address, pmd_t *pmdp);
1457
1458pud_t pudp_invalidate(struct vm_area_struct *vma, unsigned long address,
1459 pud_t *pudp);
1460
1461/*
1462 * Page table pages are page-aligned. The lower half of the top
1463 * level is used for userspace and the top half for the kernel.
1464 *
1465 * Returns true for parts of the PGD that map userspace and
1466 * false for the parts that map the kernel.
1467 */
1468static inline bool pgdp_maps_userspace(void *__ptr)
1469{
1470 unsigned long ptr = (unsigned long)__ptr;
1471
1472 return (((ptr & ~PAGE_MASK) / sizeof(pgd_t)) < PGD_KERNEL_START);
1473}
1474
1475#define pgd_leaf pgd_leaf
1476static inline bool pgd_leaf(pgd_t pgd) { return false; }
1477
1478#ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
1479/*
1480 * All top-level MITIGATION_PAGE_TABLE_ISOLATION page tables are order-1 pages
1481 * (8k-aligned and 8k in size). The kernel one is at the beginning 4k and
1482 * the user one is in the last 4k. To switch between them, you
1483 * just need to flip the 12th bit in their addresses.
1484 */
1485#define PTI_PGTABLE_SWITCH_BIT PAGE_SHIFT
1486
1487/*
1488 * This generates better code than the inline assembly in
1489 * __set_bit().
1490 */
1491static inline void *ptr_set_bit(void *ptr, int bit)
1492{
1493 unsigned long __ptr = (unsigned long)ptr;
1494
1495 __ptr |= BIT(bit);
1496 return (void *)__ptr;
1497}
1498static inline void *ptr_clear_bit(void *ptr, int bit)
1499{
1500 unsigned long __ptr = (unsigned long)ptr;
1501
1502 __ptr &= ~BIT(bit);
1503 return (void *)__ptr;
1504}
1505
1506static inline pgd_t *kernel_to_user_pgdp(pgd_t *pgdp)
1507{
1508 return ptr_set_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
1509}
1510
1511static inline pgd_t *user_to_kernel_pgdp(pgd_t *pgdp)
1512{
1513 return ptr_clear_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
1514}
1515
1516static inline p4d_t *kernel_to_user_p4dp(p4d_t *p4dp)
1517{
1518 return ptr_set_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
1519}
1520
1521static inline p4d_t *user_to_kernel_p4dp(p4d_t *p4dp)
1522{
1523 return ptr_clear_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
1524}
1525#endif /* CONFIG_MITIGATION_PAGE_TABLE_ISOLATION */
1526
1527/*
1528 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
1529 *
1530 * dst - pointer to pgd range anywhere on a pgd page
1531 * src - ""
1532 * count - the number of pgds to copy.
1533 *
1534 * dst and src can be on the same page, but the range must not overlap,
1535 * and must not cross a page boundary.
1536 */
1537static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
1538{
1539 memcpy(dst, src, count * sizeof(pgd_t));
1540#ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
1541 if (!static_cpu_has(X86_FEATURE_PTI))
1542 return;
1543 /* Clone the user space pgd as well */
1544 memcpy(kernel_to_user_pgdp(dst), kernel_to_user_pgdp(src),
1545 count * sizeof(pgd_t));
1546#endif
1547}
1548
1549#define PTE_SHIFT ilog2(PTRS_PER_PTE)
1550static inline int page_level_shift(enum pg_level level)
1551{
1552 return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
1553}
1554static inline unsigned long page_level_size(enum pg_level level)
1555{
1556 return 1UL << page_level_shift(level);
1557}
1558static inline unsigned long page_level_mask(enum pg_level level)
1559{
1560 return ~(page_level_size(level) - 1);
1561}
1562
1563/*
1564 * The x86 doesn't have any external MMU info: the kernel page
1565 * tables contain all the necessary information.
1566 */
1567static inline void update_mmu_cache(struct vm_area_struct *vma,
1568 unsigned long addr, pte_t *ptep)
1569{
1570}
1571static inline void update_mmu_cache_range(struct vm_fault *vmf,
1572 struct vm_area_struct *vma, unsigned long addr,
1573 pte_t *ptep, unsigned int nr)
1574{
1575}
1576static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
1577 unsigned long addr, pmd_t *pmd)
1578{
1579}
1580static inline void update_mmu_cache_pud(struct vm_area_struct *vma,
1581 unsigned long addr, pud_t *pud)
1582{
1583}
1584static inline pte_t pte_swp_mkexclusive(pte_t pte)
1585{
1586 return pte_set_flags(pte, _PAGE_SWP_EXCLUSIVE);
1587}
1588
1589static inline int pte_swp_exclusive(pte_t pte)
1590{
1591 return pte_flags(pte) & _PAGE_SWP_EXCLUSIVE;
1592}
1593
1594static inline pte_t pte_swp_clear_exclusive(pte_t pte)
1595{
1596 return pte_clear_flags(pte, _PAGE_SWP_EXCLUSIVE);
1597}
1598
1599#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
1600static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
1601{
1602 return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
1603}
1604
1605static inline int pte_swp_soft_dirty(pte_t pte)
1606{
1607 return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
1608}
1609
1610static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
1611{
1612 return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
1613}
1614
1615#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1616static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
1617{
1618 return pmd_set_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
1619}
1620
1621static inline int pmd_swp_soft_dirty(pmd_t pmd)
1622{
1623 return pmd_flags(pmd) & _PAGE_SWP_SOFT_DIRTY;
1624}
1625
1626static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
1627{
1628 return pmd_clear_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
1629}
1630#endif
1631#endif
1632
1633#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
1634static inline pte_t pte_swp_mkuffd_wp(pte_t pte)
1635{
1636 return pte_set_flags(pte, _PAGE_SWP_UFFD_WP);
1637}
1638
1639static inline int pte_swp_uffd_wp(pte_t pte)
1640{
1641 return pte_flags(pte) & _PAGE_SWP_UFFD_WP;
1642}
1643
1644static inline pte_t pte_swp_clear_uffd_wp(pte_t pte)
1645{
1646 return pte_clear_flags(pte, _PAGE_SWP_UFFD_WP);
1647}
1648
1649static inline pmd_t pmd_swp_mkuffd_wp(pmd_t pmd)
1650{
1651 return pmd_set_flags(pmd, _PAGE_SWP_UFFD_WP);
1652}
1653
1654static inline int pmd_swp_uffd_wp(pmd_t pmd)
1655{
1656 return pmd_flags(pmd) & _PAGE_SWP_UFFD_WP;
1657}
1658
1659static inline pmd_t pmd_swp_clear_uffd_wp(pmd_t pmd)
1660{
1661 return pmd_clear_flags(pmd, _PAGE_SWP_UFFD_WP);
1662}
1663#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
1664
1665static inline u16 pte_flags_pkey(unsigned long pte_flags)
1666{
1667#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
1668 /* ifdef to avoid doing 59-bit shift on 32-bit values */
1669 return (pte_flags & _PAGE_PKEY_MASK) >> _PAGE_BIT_PKEY_BIT0;
1670#else
1671 return 0;
1672#endif
1673}
1674
1675static inline bool __pkru_allows_pkey(u16 pkey, bool write)
1676{
1677 u32 pkru = read_pkru();
1678
1679 if (!__pkru_allows_read(pkru, pkey))
1680 return false;
1681 if (write && !__pkru_allows_write(pkru, pkey))
1682 return false;
1683
1684 return true;
1685}
1686
1687/*
1688 * 'pteval' can come from a PTE, PMD or PUD. We only check
1689 * _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the
1690 * same value on all 3 types.
1691 */
1692static inline bool __pte_access_permitted(unsigned long pteval, bool write)
1693{
1694 unsigned long need_pte_bits = _PAGE_PRESENT|_PAGE_USER;
1695
1696 /*
1697 * Write=0,Dirty=1 PTEs are shadow stack, which the kernel
1698 * shouldn't generally allow access to, but since they
1699 * are already Write=0, the below logic covers both cases.
1700 */
1701 if (write)
1702 need_pte_bits |= _PAGE_RW;
1703
1704 if ((pteval & need_pte_bits) != need_pte_bits)
1705 return 0;
1706
1707 return __pkru_allows_pkey(pte_flags_pkey(pteval), write);
1708}
1709
1710#define pte_access_permitted pte_access_permitted
1711static inline bool pte_access_permitted(pte_t pte, bool write)
1712{
1713 return __pte_access_permitted(pte_val(pte), write);
1714}
1715
1716#define pmd_access_permitted pmd_access_permitted
1717static inline bool pmd_access_permitted(pmd_t pmd, bool write)
1718{
1719 return __pte_access_permitted(pmd_val(pmd), write);
1720}
1721
1722#define pud_access_permitted pud_access_permitted
1723static inline bool pud_access_permitted(pud_t pud, bool write)
1724{
1725 return __pte_access_permitted(pud_val(pud), write);
1726}
1727
1728#define __HAVE_ARCH_PFN_MODIFY_ALLOWED 1
1729extern bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot);
1730
1731static inline bool arch_has_pfn_modify_check(void)
1732{
1733 return boot_cpu_has_bug(X86_BUG_L1TF);
1734}
1735
1736#define arch_check_zapped_pte arch_check_zapped_pte
1737void arch_check_zapped_pte(struct vm_area_struct *vma, pte_t pte);
1738
1739#define arch_check_zapped_pmd arch_check_zapped_pmd
1740void arch_check_zapped_pmd(struct vm_area_struct *vma, pmd_t pmd);
1741
1742#define arch_check_zapped_pud arch_check_zapped_pud
1743void arch_check_zapped_pud(struct vm_area_struct *vma, pud_t pud);
1744
1745#ifdef CONFIG_XEN_PV
1746#define arch_has_hw_nonleaf_pmd_young arch_has_hw_nonleaf_pmd_young
1747static inline bool arch_has_hw_nonleaf_pmd_young(void)
1748{
1749 return !cpu_feature_enabled(X86_FEATURE_XENPV);
1750}
1751#endif
1752
1753#ifdef CONFIG_PAGE_TABLE_CHECK
1754static inline bool pte_user_accessible_page(pte_t pte)
1755{
1756 return (pte_val(pte) & _PAGE_PRESENT) && (pte_val(pte) & _PAGE_USER);
1757}
1758
1759static inline bool pmd_user_accessible_page(pmd_t pmd)
1760{
1761 return pmd_leaf(pmd) && (pmd_val(pmd) & _PAGE_PRESENT) && (pmd_val(pmd) & _PAGE_USER);
1762}
1763
1764static inline bool pud_user_accessible_page(pud_t pud)
1765{
1766 return pud_leaf(pud) && (pud_val(pud) & _PAGE_PRESENT) && (pud_val(pud) & _PAGE_USER);
1767}
1768#endif
1769
1770#ifdef CONFIG_X86_SGX
1771int arch_memory_failure(unsigned long pfn, int flags);
1772#define arch_memory_failure arch_memory_failure
1773
1774bool arch_is_platform_page(u64 paddr);
1775#define arch_is_platform_page arch_is_platform_page
1776#endif
1777
1778/*
1779 * Use set_p*_safe(), and elide TLB flushing, when confident that *no*
1780 * TLB flush will be required as a result of the "set". For example, use
1781 * in scenarios where it is known ahead of time that the routine is
1782 * setting non-present entries, or re-setting an existing entry to the
1783 * same value. Otherwise, use the typical "set" helpers and flush the
1784 * TLB.
1785 */
1786#define set_pte_safe(ptep, pte) \
1787({ \
1788 WARN_ON_ONCE(pte_present(*ptep) && !pte_same(*ptep, pte)); \
1789 set_pte(ptep, pte); \
1790})
1791
1792#define set_pmd_safe(pmdp, pmd) \
1793({ \
1794 WARN_ON_ONCE(pmd_present(*pmdp) && !pmd_same(*pmdp, pmd)); \
1795 set_pmd(pmdp, pmd); \
1796})
1797
1798#define set_pud_safe(pudp, pud) \
1799({ \
1800 WARN_ON_ONCE(pud_present(*pudp) && !pud_same(*pudp, pud)); \
1801 set_pud(pudp, pud); \
1802})
1803
1804#define set_p4d_safe(p4dp, p4d) \
1805({ \
1806 WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \
1807 set_p4d(p4dp, p4d); \
1808})
1809
1810#define set_pgd_safe(pgdp, pgd) \
1811({ \
1812 WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \
1813 set_pgd(pgdp, pgd); \
1814})
1815#endif /* __ASSEMBLY__ */
1816
1817#endif /* _ASM_X86_PGTABLE_H */