Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2012 ARM Ltd.
4 */
5#ifndef __ASM_PGTABLE_H
6#define __ASM_PGTABLE_H
7
8#include <asm/bug.h>
9#include <asm/proc-fns.h>
10
11#include <asm/memory.h>
12#include <asm/mte.h>
13#include <asm/pgtable-hwdef.h>
14#include <asm/pgtable-prot.h>
15#include <asm/tlbflush.h>
16
17/*
18 * VMALLOC range.
19 *
20 * VMALLOC_START: beginning of the kernel vmalloc space
21 * VMALLOC_END: extends to the available space below vmemmap, PCI I/O space
22 * and fixed mappings
23 */
24#define VMALLOC_START (MODULES_END)
25#define VMALLOC_END (VMEMMAP_START - SZ_256M)
26
27#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
28
29#ifndef __ASSEMBLY__
30
31#include <asm/cmpxchg.h>
32#include <asm/fixmap.h>
33#include <linux/mmdebug.h>
34#include <linux/mm_types.h>
35#include <linux/sched.h>
36
37#ifdef CONFIG_TRANSPARENT_HUGEPAGE
38#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
39
40/* Set stride and tlb_level in flush_*_tlb_range */
41#define flush_pmd_tlb_range(vma, addr, end) \
42 __flush_tlb_range(vma, addr, end, PMD_SIZE, false, 2)
43#define flush_pud_tlb_range(vma, addr, end) \
44 __flush_tlb_range(vma, addr, end, PUD_SIZE, false, 1)
45#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
46
47/*
48 * Outside of a few very special situations (e.g. hibernation), we always
49 * use broadcast TLB invalidation instructions, therefore a spurious page
50 * fault on one CPU which has been handled concurrently by another CPU
51 * does not need to perform additional invalidation.
52 */
53#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
54
55/*
56 * ZERO_PAGE is a global shared page that is always zero: used
57 * for zero-mapped memory areas etc..
58 */
59extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
60#define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page))
61
62#define pte_ERROR(e) \
63 pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e))
64
65/*
66 * Macros to convert between a physical address and its placement in a
67 * page table entry, taking care of 52-bit addresses.
68 */
69#ifdef CONFIG_ARM64_PA_BITS_52
70static inline phys_addr_t __pte_to_phys(pte_t pte)
71{
72 return (pte_val(pte) & PTE_ADDR_LOW) |
73 ((pte_val(pte) & PTE_ADDR_HIGH) << 36);
74}
75static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
76{
77 return (phys | (phys >> 36)) & PTE_ADDR_MASK;
78}
79#else
80#define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_MASK)
81#define __phys_to_pte_val(phys) (phys)
82#endif
83
84#define pte_pfn(pte) (__pte_to_phys(pte) >> PAGE_SHIFT)
85#define pfn_pte(pfn,prot) \
86 __pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
87
88#define pte_none(pte) (!pte_val(pte))
89#define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0))
90#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
91
92/*
93 * The following only work if pte_present(). Undefined behaviour otherwise.
94 */
95#define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
96#define pte_young(pte) (!!(pte_val(pte) & PTE_AF))
97#define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL))
98#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
99#define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN))
100#define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT))
101#define pte_devmap(pte) (!!(pte_val(pte) & PTE_DEVMAP))
102#define pte_tagged(pte) ((pte_val(pte) & PTE_ATTRINDX_MASK) == \
103 PTE_ATTRINDX(MT_NORMAL_TAGGED))
104
105#define pte_cont_addr_end(addr, end) \
106({ unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK; \
107 (__boundary - 1 < (end) - 1) ? __boundary : (end); \
108})
109
110#define pmd_cont_addr_end(addr, end) \
111({ unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK; \
112 (__boundary - 1 < (end) - 1) ? __boundary : (end); \
113})
114
115#define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
116#define pte_sw_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY))
117#define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
118
119#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
120/*
121 * Execute-only user mappings do not have the PTE_USER bit set. All valid
122 * kernel mappings have the PTE_UXN bit set.
123 */
124#define pte_valid_not_user(pte) \
125 ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
126/*
127 * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
128 * so that we don't erroneously return false for pages that have been
129 * remapped as PROT_NONE but are yet to be flushed from the TLB.
130 * Note that we can't make any assumptions based on the state of the access
131 * flag, since ptep_clear_flush_young() elides a DSB when invalidating the
132 * TLB.
133 */
134#define pte_accessible(mm, pte) \
135 (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
136
137/*
138 * p??_access_permitted() is true for valid user mappings (PTE_USER
139 * bit set, subject to the write permission check). For execute-only
140 * mappings, like PROT_EXEC with EPAN (both PTE_USER and PTE_UXN bits
141 * not set) must return false. PROT_NONE mappings do not have the
142 * PTE_VALID bit set.
143 */
144#define pte_access_permitted(pte, write) \
145 (((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) && (!(write) || pte_write(pte)))
146#define pmd_access_permitted(pmd, write) \
147 (pte_access_permitted(pmd_pte(pmd), (write)))
148#define pud_access_permitted(pud, write) \
149 (pte_access_permitted(pud_pte(pud), (write)))
150
151static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
152{
153 pte_val(pte) &= ~pgprot_val(prot);
154 return pte;
155}
156
157static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
158{
159 pte_val(pte) |= pgprot_val(prot);
160 return pte;
161}
162
163static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot)
164{
165 pmd_val(pmd) &= ~pgprot_val(prot);
166 return pmd;
167}
168
169static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
170{
171 pmd_val(pmd) |= pgprot_val(prot);
172 return pmd;
173}
174
175static inline pte_t pte_mkwrite(pte_t pte)
176{
177 pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
178 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
179 return pte;
180}
181
182static inline pte_t pte_mkclean(pte_t pte)
183{
184 pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY));
185 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
186
187 return pte;
188}
189
190static inline pte_t pte_mkdirty(pte_t pte)
191{
192 pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
193
194 if (pte_write(pte))
195 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
196
197 return pte;
198}
199
200static inline pte_t pte_wrprotect(pte_t pte)
201{
202 /*
203 * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
204 * clear), set the PTE_DIRTY bit.
205 */
206 if (pte_hw_dirty(pte))
207 pte = pte_mkdirty(pte);
208
209 pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
210 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
211 return pte;
212}
213
214static inline pte_t pte_mkold(pte_t pte)
215{
216 return clear_pte_bit(pte, __pgprot(PTE_AF));
217}
218
219static inline pte_t pte_mkyoung(pte_t pte)
220{
221 return set_pte_bit(pte, __pgprot(PTE_AF));
222}
223
224static inline pte_t pte_mkspecial(pte_t pte)
225{
226 return set_pte_bit(pte, __pgprot(PTE_SPECIAL));
227}
228
229static inline pte_t pte_mkcont(pte_t pte)
230{
231 pte = set_pte_bit(pte, __pgprot(PTE_CONT));
232 return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE));
233}
234
235static inline pte_t pte_mknoncont(pte_t pte)
236{
237 return clear_pte_bit(pte, __pgprot(PTE_CONT));
238}
239
240static inline pte_t pte_mkpresent(pte_t pte)
241{
242 return set_pte_bit(pte, __pgprot(PTE_VALID));
243}
244
245static inline pmd_t pmd_mkcont(pmd_t pmd)
246{
247 return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
248}
249
250static inline pte_t pte_mkdevmap(pte_t pte)
251{
252 return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL));
253}
254
255static inline void set_pte(pte_t *ptep, pte_t pte)
256{
257 WRITE_ONCE(*ptep, pte);
258
259 /*
260 * Only if the new pte is valid and kernel, otherwise TLB maintenance
261 * or update_mmu_cache() have the necessary barriers.
262 */
263 if (pte_valid_not_user(pte)) {
264 dsb(ishst);
265 isb();
266 }
267}
268
269extern void __sync_icache_dcache(pte_t pteval);
270
271/*
272 * PTE bits configuration in the presence of hardware Dirty Bit Management
273 * (PTE_WRITE == PTE_DBM):
274 *
275 * Dirty Writable | PTE_RDONLY PTE_WRITE PTE_DIRTY (sw)
276 * 0 0 | 1 0 0
277 * 0 1 | 1 1 0
278 * 1 0 | 1 0 1
279 * 1 1 | 0 1 x
280 *
281 * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
282 * the page fault mechanism. Checking the dirty status of a pte becomes:
283 *
284 * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
285 */
286
287static inline void __check_racy_pte_update(struct mm_struct *mm, pte_t *ptep,
288 pte_t pte)
289{
290 pte_t old_pte;
291
292 if (!IS_ENABLED(CONFIG_DEBUG_VM))
293 return;
294
295 old_pte = READ_ONCE(*ptep);
296
297 if (!pte_valid(old_pte) || !pte_valid(pte))
298 return;
299 if (mm != current->active_mm && atomic_read(&mm->mm_users) <= 1)
300 return;
301
302 /*
303 * Check for potential race with hardware updates of the pte
304 * (ptep_set_access_flags safely changes valid ptes without going
305 * through an invalid entry).
306 */
307 VM_WARN_ONCE(!pte_young(pte),
308 "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
309 __func__, pte_val(old_pte), pte_val(pte));
310 VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte),
311 "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
312 __func__, pte_val(old_pte), pte_val(pte));
313}
314
315static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
316 pte_t *ptep, pte_t pte)
317{
318 if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
319 __sync_icache_dcache(pte);
320
321 /*
322 * If the PTE would provide user space access to the tags associated
323 * with it then ensure that the MTE tags are synchronised. Although
324 * pte_access_permitted() returns false for exec only mappings, they
325 * don't expose tags (instruction fetches don't check tags).
326 */
327 if (system_supports_mte() && pte_access_permitted(pte, false) &&
328 !pte_special(pte)) {
329 pte_t old_pte = READ_ONCE(*ptep);
330 /*
331 * We only need to synchronise if the new PTE has tags enabled
332 * or if swapping in (in which case another mapping may have
333 * set tags in the past even if this PTE isn't tagged).
334 * (!pte_none() && !pte_present()) is an open coded version of
335 * is_swap_pte()
336 */
337 if (pte_tagged(pte) || (!pte_none(old_pte) && !pte_present(old_pte)))
338 mte_sync_tags(old_pte, pte);
339 }
340
341 __check_racy_pte_update(mm, ptep, pte);
342
343 set_pte(ptep, pte);
344}
345
346/*
347 * Huge pte definitions.
348 */
349#define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
350
351/*
352 * Hugetlb definitions.
353 */
354#define HUGE_MAX_HSTATE 4
355#define HPAGE_SHIFT PMD_SHIFT
356#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
357#define HPAGE_MASK (~(HPAGE_SIZE - 1))
358#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
359
360static inline pte_t pgd_pte(pgd_t pgd)
361{
362 return __pte(pgd_val(pgd));
363}
364
365static inline pte_t p4d_pte(p4d_t p4d)
366{
367 return __pte(p4d_val(p4d));
368}
369
370static inline pte_t pud_pte(pud_t pud)
371{
372 return __pte(pud_val(pud));
373}
374
375static inline pud_t pte_pud(pte_t pte)
376{
377 return __pud(pte_val(pte));
378}
379
380static inline pmd_t pud_pmd(pud_t pud)
381{
382 return __pmd(pud_val(pud));
383}
384
385static inline pte_t pmd_pte(pmd_t pmd)
386{
387 return __pte(pmd_val(pmd));
388}
389
390static inline pmd_t pte_pmd(pte_t pte)
391{
392 return __pmd(pte_val(pte));
393}
394
395static inline pgprot_t mk_pud_sect_prot(pgprot_t prot)
396{
397 return __pgprot((pgprot_val(prot) & ~PUD_TABLE_BIT) | PUD_TYPE_SECT);
398}
399
400static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot)
401{
402 return __pgprot((pgprot_val(prot) & ~PMD_TABLE_BIT) | PMD_TYPE_SECT);
403}
404
405#ifdef CONFIG_NUMA_BALANCING
406/*
407 * See the comment in include/linux/pgtable.h
408 */
409static inline int pte_protnone(pte_t pte)
410{
411 return (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) == PTE_PROT_NONE;
412}
413
414static inline int pmd_protnone(pmd_t pmd)
415{
416 return pte_protnone(pmd_pte(pmd));
417}
418#endif
419
420#define pmd_present_invalid(pmd) (!!(pmd_val(pmd) & PMD_PRESENT_INVALID))
421
422static inline int pmd_present(pmd_t pmd)
423{
424 return pte_present(pmd_pte(pmd)) || pmd_present_invalid(pmd);
425}
426
427/*
428 * THP definitions.
429 */
430
431#ifdef CONFIG_TRANSPARENT_HUGEPAGE
432static inline int pmd_trans_huge(pmd_t pmd)
433{
434 return pmd_val(pmd) && pmd_present(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
435}
436#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
437
438#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
439#define pmd_young(pmd) pte_young(pmd_pte(pmd))
440#define pmd_valid(pmd) pte_valid(pmd_pte(pmd))
441#define pmd_cont(pmd) pte_cont(pmd_pte(pmd))
442#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
443#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
444#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
445#define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
446#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
447#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
448
449static inline pmd_t pmd_mkinvalid(pmd_t pmd)
450{
451 pmd = set_pmd_bit(pmd, __pgprot(PMD_PRESENT_INVALID));
452 pmd = clear_pmd_bit(pmd, __pgprot(PMD_SECT_VALID));
453
454 return pmd;
455}
456
457#define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
458
459#define pmd_write(pmd) pte_write(pmd_pte(pmd))
460
461#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
462
463#ifdef CONFIG_TRANSPARENT_HUGEPAGE
464#define pmd_devmap(pmd) pte_devmap(pmd_pte(pmd))
465#endif
466static inline pmd_t pmd_mkdevmap(pmd_t pmd)
467{
468 return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP)));
469}
470
471#define __pmd_to_phys(pmd) __pte_to_phys(pmd_pte(pmd))
472#define __phys_to_pmd_val(phys) __phys_to_pte_val(phys)
473#define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT)
474#define pfn_pmd(pfn,prot) __pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
475#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
476
477#define pud_young(pud) pte_young(pud_pte(pud))
478#define pud_mkyoung(pud) pte_pud(pte_mkyoung(pud_pte(pud)))
479#define pud_write(pud) pte_write(pud_pte(pud))
480
481#define pud_mkhuge(pud) (__pud(pud_val(pud) & ~PUD_TABLE_BIT))
482
483#define __pud_to_phys(pud) __pte_to_phys(pud_pte(pud))
484#define __phys_to_pud_val(phys) __phys_to_pte_val(phys)
485#define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT)
486#define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
487
488#define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
489#define set_pud_at(mm, addr, pudp, pud) set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud))
490
491#define __p4d_to_phys(p4d) __pte_to_phys(p4d_pte(p4d))
492#define __phys_to_p4d_val(phys) __phys_to_pte_val(phys)
493
494#define __pgd_to_phys(pgd) __pte_to_phys(pgd_pte(pgd))
495#define __phys_to_pgd_val(phys) __phys_to_pte_val(phys)
496
497#define __pgprot_modify(prot,mask,bits) \
498 __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
499
500#define pgprot_nx(prot) \
501 __pgprot_modify(prot, PTE_MAYBE_GP, PTE_PXN)
502
503/*
504 * Mark the prot value as uncacheable and unbufferable.
505 */
506#define pgprot_noncached(prot) \
507 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
508#define pgprot_writecombine(prot) \
509 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
510#define pgprot_device(prot) \
511 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
512#define pgprot_tagged(prot) \
513 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_TAGGED))
514#define pgprot_mhp pgprot_tagged
515/*
516 * DMA allocations for non-coherent devices use what the Arm architecture calls
517 * "Normal non-cacheable" memory, which permits speculation, unaligned accesses
518 * and merging of writes. This is different from "Device-nGnR[nE]" memory which
519 * is intended for MMIO and thus forbids speculation, preserves access size,
520 * requires strict alignment and can also force write responses to come from the
521 * endpoint.
522 */
523#define pgprot_dmacoherent(prot) \
524 __pgprot_modify(prot, PTE_ATTRINDX_MASK, \
525 PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
526
527#define __HAVE_PHYS_MEM_ACCESS_PROT
528struct file;
529extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
530 unsigned long size, pgprot_t vma_prot);
531
532#define pmd_none(pmd) (!pmd_val(pmd))
533
534#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
535 PMD_TYPE_TABLE)
536#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
537 PMD_TYPE_SECT)
538#define pmd_leaf(pmd) pmd_sect(pmd)
539#define pmd_bad(pmd) (!pmd_table(pmd))
540
541#define pmd_leaf_size(pmd) (pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE)
542#define pte_leaf_size(pte) (pte_cont(pte) ? CONT_PTE_SIZE : PAGE_SIZE)
543
544#if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
545static inline bool pud_sect(pud_t pud) { return false; }
546static inline bool pud_table(pud_t pud) { return true; }
547#else
548#define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
549 PUD_TYPE_SECT)
550#define pud_table(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
551 PUD_TYPE_TABLE)
552#endif
553
554extern pgd_t init_pg_dir[PTRS_PER_PGD];
555extern pgd_t init_pg_end[];
556extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
557extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
558extern pgd_t idmap_pg_end[];
559extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
560extern pgd_t reserved_pg_dir[PTRS_PER_PGD];
561
562extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd);
563
564static inline bool in_swapper_pgdir(void *addr)
565{
566 return ((unsigned long)addr & PAGE_MASK) ==
567 ((unsigned long)swapper_pg_dir & PAGE_MASK);
568}
569
570static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
571{
572#ifdef __PAGETABLE_PMD_FOLDED
573 if (in_swapper_pgdir(pmdp)) {
574 set_swapper_pgd((pgd_t *)pmdp, __pgd(pmd_val(pmd)));
575 return;
576 }
577#endif /* __PAGETABLE_PMD_FOLDED */
578
579 WRITE_ONCE(*pmdp, pmd);
580
581 if (pmd_valid(pmd)) {
582 dsb(ishst);
583 isb();
584 }
585}
586
587static inline void pmd_clear(pmd_t *pmdp)
588{
589 set_pmd(pmdp, __pmd(0));
590}
591
592static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
593{
594 return __pmd_to_phys(pmd);
595}
596
597static inline unsigned long pmd_page_vaddr(pmd_t pmd)
598{
599 return (unsigned long)__va(pmd_page_paddr(pmd));
600}
601
602/* Find an entry in the third-level page table. */
603#define pte_offset_phys(dir,addr) (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t))
604
605#define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr))
606#define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr))
607#define pte_clear_fixmap() clear_fixmap(FIX_PTE)
608
609#define pmd_page(pmd) phys_to_page(__pmd_to_phys(pmd))
610
611/* use ONLY for statically allocated translation tables */
612#define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
613
614/*
615 * Conversion functions: convert a page and protection to a page entry,
616 * and a page entry and page directory to the page they refer to.
617 */
618#define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot)
619
620#if CONFIG_PGTABLE_LEVELS > 2
621
622#define pmd_ERROR(e) \
623 pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
624
625#define pud_none(pud) (!pud_val(pud))
626#define pud_bad(pud) (!pud_table(pud))
627#define pud_present(pud) pte_present(pud_pte(pud))
628#define pud_leaf(pud) pud_sect(pud)
629#define pud_valid(pud) pte_valid(pud_pte(pud))
630
631static inline void set_pud(pud_t *pudp, pud_t pud)
632{
633#ifdef __PAGETABLE_PUD_FOLDED
634 if (in_swapper_pgdir(pudp)) {
635 set_swapper_pgd((pgd_t *)pudp, __pgd(pud_val(pud)));
636 return;
637 }
638#endif /* __PAGETABLE_PUD_FOLDED */
639
640 WRITE_ONCE(*pudp, pud);
641
642 if (pud_valid(pud)) {
643 dsb(ishst);
644 isb();
645 }
646}
647
648static inline void pud_clear(pud_t *pudp)
649{
650 set_pud(pudp, __pud(0));
651}
652
653static inline phys_addr_t pud_page_paddr(pud_t pud)
654{
655 return __pud_to_phys(pud);
656}
657
658static inline pmd_t *pud_pgtable(pud_t pud)
659{
660 return (pmd_t *)__va(pud_page_paddr(pud));
661}
662
663/* Find an entry in the second-level page table. */
664#define pmd_offset_phys(dir, addr) (pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t))
665
666#define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
667#define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr))
668#define pmd_clear_fixmap() clear_fixmap(FIX_PMD)
669
670#define pud_page(pud) phys_to_page(__pud_to_phys(pud))
671
672/* use ONLY for statically allocated translation tables */
673#define pmd_offset_kimg(dir,addr) ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))
674
675#else
676
677#define pud_page_paddr(pud) ({ BUILD_BUG(); 0; })
678
679/* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
680#define pmd_set_fixmap(addr) NULL
681#define pmd_set_fixmap_offset(pudp, addr) ((pmd_t *)pudp)
682#define pmd_clear_fixmap()
683
684#define pmd_offset_kimg(dir,addr) ((pmd_t *)dir)
685
686#endif /* CONFIG_PGTABLE_LEVELS > 2 */
687
688#if CONFIG_PGTABLE_LEVELS > 3
689
690#define pud_ERROR(e) \
691 pr_err("%s:%d: bad pud %016llx.\n", __FILE__, __LINE__, pud_val(e))
692
693#define p4d_none(p4d) (!p4d_val(p4d))
694#define p4d_bad(p4d) (!(p4d_val(p4d) & 2))
695#define p4d_present(p4d) (p4d_val(p4d))
696
697static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
698{
699 if (in_swapper_pgdir(p4dp)) {
700 set_swapper_pgd((pgd_t *)p4dp, __pgd(p4d_val(p4d)));
701 return;
702 }
703
704 WRITE_ONCE(*p4dp, p4d);
705 dsb(ishst);
706 isb();
707}
708
709static inline void p4d_clear(p4d_t *p4dp)
710{
711 set_p4d(p4dp, __p4d(0));
712}
713
714static inline phys_addr_t p4d_page_paddr(p4d_t p4d)
715{
716 return __p4d_to_phys(p4d);
717}
718
719static inline pud_t *p4d_pgtable(p4d_t p4d)
720{
721 return (pud_t *)__va(p4d_page_paddr(p4d));
722}
723
724/* Find an entry in the first-level page table. */
725#define pud_offset_phys(dir, addr) (p4d_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t))
726
727#define pud_set_fixmap(addr) ((pud_t *)set_fixmap_offset(FIX_PUD, addr))
728#define pud_set_fixmap_offset(p4d, addr) pud_set_fixmap(pud_offset_phys(p4d, addr))
729#define pud_clear_fixmap() clear_fixmap(FIX_PUD)
730
731#define p4d_page(p4d) pfn_to_page(__phys_to_pfn(__p4d_to_phys(p4d)))
732
733/* use ONLY for statically allocated translation tables */
734#define pud_offset_kimg(dir,addr) ((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))
735
736#else
737
738#define p4d_page_paddr(p4d) ({ BUILD_BUG(); 0;})
739#define pgd_page_paddr(pgd) ({ BUILD_BUG(); 0;})
740
741/* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
742#define pud_set_fixmap(addr) NULL
743#define pud_set_fixmap_offset(pgdp, addr) ((pud_t *)pgdp)
744#define pud_clear_fixmap()
745
746#define pud_offset_kimg(dir,addr) ((pud_t *)dir)
747
748#endif /* CONFIG_PGTABLE_LEVELS > 3 */
749
750#define pgd_ERROR(e) \
751 pr_err("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e))
752
753#define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
754#define pgd_clear_fixmap() clear_fixmap(FIX_PGD)
755
756static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
757{
758 /*
759 * Normal and Normal-Tagged are two different memory types and indices
760 * in MAIR_EL1. The mask below has to include PTE_ATTRINDX_MASK.
761 */
762 const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
763 PTE_PROT_NONE | PTE_VALID | PTE_WRITE | PTE_GP |
764 PTE_ATTRINDX_MASK;
765 /* preserve the hardware dirty information */
766 if (pte_hw_dirty(pte))
767 pte = pte_mkdirty(pte);
768 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
769 return pte;
770}
771
772static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
773{
774 return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
775}
776
777#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
778extern int ptep_set_access_flags(struct vm_area_struct *vma,
779 unsigned long address, pte_t *ptep,
780 pte_t entry, int dirty);
781
782#ifdef CONFIG_TRANSPARENT_HUGEPAGE
783#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
784static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
785 unsigned long address, pmd_t *pmdp,
786 pmd_t entry, int dirty)
787{
788 return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
789}
790
791static inline int pud_devmap(pud_t pud)
792{
793 return 0;
794}
795
796static inline int pgd_devmap(pgd_t pgd)
797{
798 return 0;
799}
800#endif
801
802/*
803 * Atomic pte/pmd modifications.
804 */
805#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
806static inline int __ptep_test_and_clear_young(pte_t *ptep)
807{
808 pte_t old_pte, pte;
809
810 pte = READ_ONCE(*ptep);
811 do {
812 old_pte = pte;
813 pte = pte_mkold(pte);
814 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
815 pte_val(old_pte), pte_val(pte));
816 } while (pte_val(pte) != pte_val(old_pte));
817
818 return pte_young(pte);
819}
820
821static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
822 unsigned long address,
823 pte_t *ptep)
824{
825 return __ptep_test_and_clear_young(ptep);
826}
827
828#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
829static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
830 unsigned long address, pte_t *ptep)
831{
832 int young = ptep_test_and_clear_young(vma, address, ptep);
833
834 if (young) {
835 /*
836 * We can elide the trailing DSB here since the worst that can
837 * happen is that a CPU continues to use the young entry in its
838 * TLB and we mistakenly reclaim the associated page. The
839 * window for such an event is bounded by the next
840 * context-switch, which provides a DSB to complete the TLB
841 * invalidation.
842 */
843 flush_tlb_page_nosync(vma, address);
844 }
845
846 return young;
847}
848
849#ifdef CONFIG_TRANSPARENT_HUGEPAGE
850#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
851static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
852 unsigned long address,
853 pmd_t *pmdp)
854{
855 return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
856}
857#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
858
859#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
860static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
861 unsigned long address, pte_t *ptep)
862{
863 return __pte(xchg_relaxed(&pte_val(*ptep), 0));
864}
865
866#ifdef CONFIG_TRANSPARENT_HUGEPAGE
867#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
868static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
869 unsigned long address, pmd_t *pmdp)
870{
871 return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
872}
873#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
874
875/*
876 * ptep_set_wrprotect - mark read-only while trasferring potential hardware
877 * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
878 */
879#define __HAVE_ARCH_PTEP_SET_WRPROTECT
880static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
881{
882 pte_t old_pte, pte;
883
884 pte = READ_ONCE(*ptep);
885 do {
886 old_pte = pte;
887 pte = pte_wrprotect(pte);
888 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
889 pte_val(old_pte), pte_val(pte));
890 } while (pte_val(pte) != pte_val(old_pte));
891}
892
893#ifdef CONFIG_TRANSPARENT_HUGEPAGE
894#define __HAVE_ARCH_PMDP_SET_WRPROTECT
895static inline void pmdp_set_wrprotect(struct mm_struct *mm,
896 unsigned long address, pmd_t *pmdp)
897{
898 ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
899}
900
901#define pmdp_establish pmdp_establish
902static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
903 unsigned long address, pmd_t *pmdp, pmd_t pmd)
904{
905 return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd)));
906}
907#endif
908
909/*
910 * Encode and decode a swap entry:
911 * bits 0-1: present (must be zero)
912 * bits 2-7: swap type
913 * bits 8-57: swap offset
914 * bit 58: PTE_PROT_NONE (must be zero)
915 */
916#define __SWP_TYPE_SHIFT 2
917#define __SWP_TYPE_BITS 6
918#define __SWP_OFFSET_BITS 50
919#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
920#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
921#define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1)
922
923#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
924#define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
925#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
926
927#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
928#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
929
930#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
931#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
932#define __swp_entry_to_pmd(swp) __pmd((swp).val)
933#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
934
935/*
936 * Ensure that there are not more swap files than can be encoded in the kernel
937 * PTEs.
938 */
939#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
940
941extern int kern_addr_valid(unsigned long addr);
942
943#ifdef CONFIG_ARM64_MTE
944
945#define __HAVE_ARCH_PREPARE_TO_SWAP
946static inline int arch_prepare_to_swap(struct page *page)
947{
948 if (system_supports_mte())
949 return mte_save_tags(page);
950 return 0;
951}
952
953#define __HAVE_ARCH_SWAP_INVALIDATE
954static inline void arch_swap_invalidate_page(int type, pgoff_t offset)
955{
956 if (system_supports_mte())
957 mte_invalidate_tags(type, offset);
958}
959
960static inline void arch_swap_invalidate_area(int type)
961{
962 if (system_supports_mte())
963 mte_invalidate_tags_area(type);
964}
965
966#define __HAVE_ARCH_SWAP_RESTORE
967static inline void arch_swap_restore(swp_entry_t entry, struct page *page)
968{
969 if (system_supports_mte() && mte_restore_tags(entry, page))
970 set_bit(PG_mte_tagged, &page->flags);
971}
972
973#endif /* CONFIG_ARM64_MTE */
974
975/*
976 * On AArch64, the cache coherency is handled via the set_pte_at() function.
977 */
978static inline void update_mmu_cache(struct vm_area_struct *vma,
979 unsigned long addr, pte_t *ptep)
980{
981 /*
982 * We don't do anything here, so there's a very small chance of
983 * us retaking a user fault which we just fixed up. The alternative
984 * is doing a dsb(ishst), but that penalises the fastpath.
985 */
986}
987
988#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
989
990#ifdef CONFIG_ARM64_PA_BITS_52
991#define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
992#else
993#define phys_to_ttbr(addr) (addr)
994#endif
995
996/*
997 * On arm64 without hardware Access Flag, copying from user will fail because
998 * the pte is old and cannot be marked young. So we always end up with zeroed
999 * page after fork() + CoW for pfn mappings. We don't always have a
1000 * hardware-managed access flag on arm64.
1001 */
1002static inline bool arch_faults_on_old_pte(void)
1003{
1004 WARN_ON(preemptible());
1005
1006 return !cpu_has_hw_af();
1007}
1008#define arch_faults_on_old_pte arch_faults_on_old_pte
1009
1010/*
1011 * Experimentally, it's cheap to set the access flag in hardware and we
1012 * benefit from prefaulting mappings as 'old' to start with.
1013 */
1014static inline bool arch_wants_old_prefaulted_pte(void)
1015{
1016 return !arch_faults_on_old_pte();
1017}
1018#define arch_wants_old_prefaulted_pte arch_wants_old_prefaulted_pte
1019
1020static inline bool pud_sect_supported(void)
1021{
1022 return PAGE_SIZE == SZ_4K;
1023}
1024
1025
1026#endif /* !__ASSEMBLY__ */
1027
1028#endif /* __ASM_PGTABLE_H */