Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2012 Regents of the University of California
4 */
5
6#ifndef _ASM_RISCV_PGTABLE_H
7#define _ASM_RISCV_PGTABLE_H
8
9#include <linux/mmzone.h>
10#include <linux/sizes.h>
11
12#include <asm/pgtable-bits.h>
13
14#ifndef CONFIG_MMU
15#define KERNEL_LINK_ADDR PAGE_OFFSET
16#define KERN_VIRT_SIZE (UL(-1))
17#else
18
19#define ADDRESS_SPACE_END (UL(-1))
20
21#ifdef CONFIG_64BIT
22/* Leave 2GB for kernel and BPF at the end of the address space */
23#define KERNEL_LINK_ADDR (ADDRESS_SPACE_END - SZ_2G + 1)
24#else
25#define KERNEL_LINK_ADDR PAGE_OFFSET
26#endif
27
28/* Number of entries in the page global directory */
29#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
30/* Number of entries in the page table */
31#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
32
33/*
34 * Half of the kernel address space (1/4 of the entries of the page global
35 * directory) is for the direct mapping.
36 */
37#define KERN_VIRT_SIZE ((PTRS_PER_PGD / 2 * PGDIR_SIZE) / 2)
38
39#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
40#define VMALLOC_END PAGE_OFFSET
41#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
42
43#define BPF_JIT_REGION_SIZE (SZ_128M)
44#ifdef CONFIG_64BIT
45#define BPF_JIT_REGION_START (BPF_JIT_REGION_END - BPF_JIT_REGION_SIZE)
46#define BPF_JIT_REGION_END (MODULES_END)
47#else
48#define BPF_JIT_REGION_START (PAGE_OFFSET - BPF_JIT_REGION_SIZE)
49#define BPF_JIT_REGION_END (VMALLOC_END)
50#endif
51
52/* Modules always live before the kernel */
53#ifdef CONFIG_64BIT
54/* This is used to define the end of the KASAN shadow region */
55#define MODULES_LOWEST_VADDR (KERNEL_LINK_ADDR - SZ_2G)
56#define MODULES_VADDR (PFN_ALIGN((unsigned long)&_end) - SZ_2G)
57#define MODULES_END (PFN_ALIGN((unsigned long)&_start))
58#else
59#define MODULES_VADDR VMALLOC_START
60#define MODULES_END VMALLOC_END
61#endif
62
63/*
64 * Roughly size the vmemmap space to be large enough to fit enough
65 * struct pages to map half the virtual address space. Then
66 * position vmemmap directly below the VMALLOC region.
67 */
68#define VA_BITS_SV32 32
69#ifdef CONFIG_64BIT
70#define VA_BITS_SV39 39
71#define VA_BITS_SV48 48
72#define VA_BITS_SV57 57
73
74#define VA_BITS (pgtable_l5_enabled ? \
75 VA_BITS_SV57 : (pgtable_l4_enabled ? VA_BITS_SV48 : VA_BITS_SV39))
76#else
77#define VA_BITS VA_BITS_SV32
78#endif
79
80#define VMEMMAP_SHIFT \
81 (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
82#define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT)
83#define VMEMMAP_END VMALLOC_START
84#define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE)
85
86/*
87 * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
88 * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
89 */
90#define vmemmap ((struct page *)VMEMMAP_START - (phys_ram_base >> PAGE_SHIFT))
91
92#define PCI_IO_SIZE SZ_16M
93#define PCI_IO_END VMEMMAP_START
94#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
95
96#define FIXADDR_TOP PCI_IO_START
97#ifdef CONFIG_64BIT
98#define MAX_FDT_SIZE PMD_SIZE
99#define FIX_FDT_SIZE (MAX_FDT_SIZE + SZ_2M)
100#define FIXADDR_SIZE (PMD_SIZE + FIX_FDT_SIZE)
101#else
102#define MAX_FDT_SIZE PGDIR_SIZE
103#define FIX_FDT_SIZE MAX_FDT_SIZE
104#define FIXADDR_SIZE (PGDIR_SIZE + FIX_FDT_SIZE)
105#endif
106#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
107
108#endif
109
110#ifdef CONFIG_XIP_KERNEL
111#define XIP_OFFSET SZ_32M
112#define XIP_OFFSET_MASK (SZ_32M - 1)
113#else
114#define XIP_OFFSET 0
115#endif
116
117#ifndef __ASSEMBLY__
118
119#include <asm/page.h>
120#include <asm/tlbflush.h>
121#include <linux/mm_types.h>
122#include <asm/compat.h>
123
124#define __page_val_to_pfn(_val) (((_val) & _PAGE_PFN_MASK) >> _PAGE_PFN_SHIFT)
125
126#ifdef CONFIG_64BIT
127#include <asm/pgtable-64.h>
128
129#define VA_USER_SV39 (UL(1) << (VA_BITS_SV39 - 1))
130#define VA_USER_SV48 (UL(1) << (VA_BITS_SV48 - 1))
131#define VA_USER_SV57 (UL(1) << (VA_BITS_SV57 - 1))
132
133#define MMAP_VA_BITS_64 ((VA_BITS >= VA_BITS_SV48) ? VA_BITS_SV48 : VA_BITS)
134#define MMAP_MIN_VA_BITS_64 (VA_BITS_SV39)
135#define MMAP_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_VA_BITS_64)
136#define MMAP_MIN_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_MIN_VA_BITS_64)
137#else
138#include <asm/pgtable-32.h>
139#endif /* CONFIG_64BIT */
140
141#include <linux/page_table_check.h>
142
143#ifdef CONFIG_XIP_KERNEL
144#define XIP_FIXUP(addr) ({ \
145 uintptr_t __a = (uintptr_t)(addr); \
146 (__a >= CONFIG_XIP_PHYS_ADDR && \
147 __a < CONFIG_XIP_PHYS_ADDR + XIP_OFFSET * 2) ? \
148 __a - CONFIG_XIP_PHYS_ADDR + CONFIG_PHYS_RAM_BASE - XIP_OFFSET :\
149 __a; \
150 })
151#else
152#define XIP_FIXUP(addr) (addr)
153#endif /* CONFIG_XIP_KERNEL */
154
155struct pt_alloc_ops {
156 pte_t *(*get_pte_virt)(phys_addr_t pa);
157 phys_addr_t (*alloc_pte)(uintptr_t va);
158#ifndef __PAGETABLE_PMD_FOLDED
159 pmd_t *(*get_pmd_virt)(phys_addr_t pa);
160 phys_addr_t (*alloc_pmd)(uintptr_t va);
161 pud_t *(*get_pud_virt)(phys_addr_t pa);
162 phys_addr_t (*alloc_pud)(uintptr_t va);
163 p4d_t *(*get_p4d_virt)(phys_addr_t pa);
164 phys_addr_t (*alloc_p4d)(uintptr_t va);
165#endif
166};
167
168extern struct pt_alloc_ops pt_ops __initdata;
169
170#ifdef CONFIG_MMU
171/* Number of PGD entries that a user-mode program can use */
172#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
173
174/* Page protection bits */
175#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
176
177#define PAGE_NONE __pgprot(_PAGE_PROT_NONE | _PAGE_READ)
178#define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ)
179#define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
180#define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC)
181#define PAGE_READ_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
182#define PAGE_WRITE_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | \
183 _PAGE_EXEC | _PAGE_WRITE)
184
185#define PAGE_COPY PAGE_READ
186#define PAGE_COPY_EXEC PAGE_READ_EXEC
187#define PAGE_SHARED PAGE_WRITE
188#define PAGE_SHARED_EXEC PAGE_WRITE_EXEC
189
190#define _PAGE_KERNEL (_PAGE_READ \
191 | _PAGE_WRITE \
192 | _PAGE_PRESENT \
193 | _PAGE_ACCESSED \
194 | _PAGE_DIRTY \
195 | _PAGE_GLOBAL)
196
197#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
198#define PAGE_KERNEL_READ __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
199#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC)
200#define PAGE_KERNEL_READ_EXEC __pgprot((_PAGE_KERNEL & ~_PAGE_WRITE) \
201 | _PAGE_EXEC)
202
203#define PAGE_TABLE __pgprot(_PAGE_TABLE)
204
205#define _PAGE_IOREMAP ((_PAGE_KERNEL & ~_PAGE_MTMASK) | _PAGE_IO)
206#define PAGE_KERNEL_IO __pgprot(_PAGE_IOREMAP)
207
208extern pgd_t swapper_pg_dir[];
209extern pgd_t trampoline_pg_dir[];
210extern pgd_t early_pg_dir[];
211
212#ifdef CONFIG_TRANSPARENT_HUGEPAGE
213static inline int pmd_present(pmd_t pmd)
214{
215 /*
216 * Checking for _PAGE_LEAF is needed too because:
217 * When splitting a THP, split_huge_page() will temporarily clear
218 * the present bit, in this situation, pmd_present() and
219 * pmd_trans_huge() still needs to return true.
220 */
221 return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE | _PAGE_LEAF));
222}
223#else
224static inline int pmd_present(pmd_t pmd)
225{
226 return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
227}
228#endif
229
230static inline int pmd_none(pmd_t pmd)
231{
232 return (pmd_val(pmd) == 0);
233}
234
235static inline int pmd_bad(pmd_t pmd)
236{
237 return !pmd_present(pmd) || (pmd_val(pmd) & _PAGE_LEAF);
238}
239
240#define pmd_leaf pmd_leaf
241static inline bool pmd_leaf(pmd_t pmd)
242{
243 return pmd_present(pmd) && (pmd_val(pmd) & _PAGE_LEAF);
244}
245
246static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
247{
248 WRITE_ONCE(*pmdp, pmd);
249}
250
251static inline void pmd_clear(pmd_t *pmdp)
252{
253 set_pmd(pmdp, __pmd(0));
254}
255
256static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot)
257{
258 unsigned long prot_val = pgprot_val(prot);
259
260 ALT_THEAD_PMA(prot_val);
261
262 return __pgd((pfn << _PAGE_PFN_SHIFT) | prot_val);
263}
264
265static inline unsigned long _pgd_pfn(pgd_t pgd)
266{
267 return __page_val_to_pfn(pgd_val(pgd));
268}
269
270static inline struct page *pmd_page(pmd_t pmd)
271{
272 return pfn_to_page(__page_val_to_pfn(pmd_val(pmd)));
273}
274
275static inline unsigned long pmd_page_vaddr(pmd_t pmd)
276{
277 return (unsigned long)pfn_to_virt(__page_val_to_pfn(pmd_val(pmd)));
278}
279
280static inline pte_t pmd_pte(pmd_t pmd)
281{
282 return __pte(pmd_val(pmd));
283}
284
285static inline pte_t pud_pte(pud_t pud)
286{
287 return __pte(pud_val(pud));
288}
289
290#ifdef CONFIG_RISCV_ISA_SVNAPOT
291#include <asm/cpufeature.h>
292
293static __always_inline bool has_svnapot(void)
294{
295 return riscv_has_extension_likely(RISCV_ISA_EXT_SVNAPOT);
296}
297
298static inline unsigned long pte_napot(pte_t pte)
299{
300 return pte_val(pte) & _PAGE_NAPOT;
301}
302
303static inline pte_t pte_mknapot(pte_t pte, unsigned int order)
304{
305 int pos = order - 1 + _PAGE_PFN_SHIFT;
306 unsigned long napot_bit = BIT(pos);
307 unsigned long napot_mask = ~GENMASK(pos, _PAGE_PFN_SHIFT);
308
309 return __pte((pte_val(pte) & napot_mask) | napot_bit | _PAGE_NAPOT);
310}
311
312#else
313
314static __always_inline bool has_svnapot(void) { return false; }
315
316static inline unsigned long pte_napot(pte_t pte)
317{
318 return 0;
319}
320
321#endif /* CONFIG_RISCV_ISA_SVNAPOT */
322
323/* Yields the page frame number (PFN) of a page table entry */
324static inline unsigned long pte_pfn(pte_t pte)
325{
326 unsigned long res = __page_val_to_pfn(pte_val(pte));
327
328 if (has_svnapot() && pte_napot(pte))
329 res = res & (res - 1UL);
330
331 return res;
332}
333
334#define pte_page(x) pfn_to_page(pte_pfn(x))
335
336/* Constructs a page table entry */
337static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
338{
339 unsigned long prot_val = pgprot_val(prot);
340
341 ALT_THEAD_PMA(prot_val);
342
343 return __pte((pfn << _PAGE_PFN_SHIFT) | prot_val);
344}
345
346#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
347
348static inline int pte_present(pte_t pte)
349{
350 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
351}
352
353static inline int pte_none(pte_t pte)
354{
355 return (pte_val(pte) == 0);
356}
357
358static inline int pte_write(pte_t pte)
359{
360 return pte_val(pte) & _PAGE_WRITE;
361}
362
363static inline int pte_exec(pte_t pte)
364{
365 return pte_val(pte) & _PAGE_EXEC;
366}
367
368static inline int pte_user(pte_t pte)
369{
370 return pte_val(pte) & _PAGE_USER;
371}
372
373static inline int pte_huge(pte_t pte)
374{
375 return pte_present(pte) && (pte_val(pte) & _PAGE_LEAF);
376}
377
378static inline int pte_dirty(pte_t pte)
379{
380 return pte_val(pte) & _PAGE_DIRTY;
381}
382
383static inline int pte_young(pte_t pte)
384{
385 return pte_val(pte) & _PAGE_ACCESSED;
386}
387
388static inline int pte_special(pte_t pte)
389{
390 return pte_val(pte) & _PAGE_SPECIAL;
391}
392
393/* static inline pte_t pte_rdprotect(pte_t pte) */
394
395static inline pte_t pte_wrprotect(pte_t pte)
396{
397 return __pte(pte_val(pte) & ~(_PAGE_WRITE));
398}
399
400/* static inline pte_t pte_mkread(pte_t pte) */
401
402static inline pte_t pte_mkwrite_novma(pte_t pte)
403{
404 return __pte(pte_val(pte) | _PAGE_WRITE);
405}
406
407/* static inline pte_t pte_mkexec(pte_t pte) */
408
409static inline pte_t pte_mkdirty(pte_t pte)
410{
411 return __pte(pte_val(pte) | _PAGE_DIRTY);
412}
413
414static inline pte_t pte_mkclean(pte_t pte)
415{
416 return __pte(pte_val(pte) & ~(_PAGE_DIRTY));
417}
418
419static inline pte_t pte_mkyoung(pte_t pte)
420{
421 return __pte(pte_val(pte) | _PAGE_ACCESSED);
422}
423
424static inline pte_t pte_mkold(pte_t pte)
425{
426 return __pte(pte_val(pte) & ~(_PAGE_ACCESSED));
427}
428
429static inline pte_t pte_mkspecial(pte_t pte)
430{
431 return __pte(pte_val(pte) | _PAGE_SPECIAL);
432}
433
434static inline pte_t pte_mkhuge(pte_t pte)
435{
436 return pte;
437}
438
439#ifdef CONFIG_RISCV_ISA_SVNAPOT
440#define pte_leaf_size(pte) (pte_napot(pte) ? \
441 napot_cont_size(napot_cont_order(pte)) :\
442 PAGE_SIZE)
443#endif
444
445#ifdef CONFIG_NUMA_BALANCING
446/*
447 * See the comment in include/asm-generic/pgtable.h
448 */
449static inline int pte_protnone(pte_t pte)
450{
451 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)) == _PAGE_PROT_NONE;
452}
453
454static inline int pmd_protnone(pmd_t pmd)
455{
456 return pte_protnone(pmd_pte(pmd));
457}
458#endif
459
460/* Modify page protection bits */
461static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
462{
463 unsigned long newprot_val = pgprot_val(newprot);
464
465 ALT_THEAD_PMA(newprot_val);
466
467 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | newprot_val);
468}
469
470#define pgd_ERROR(e) \
471 pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e))
472
473
474/* Commit new configuration to MMU hardware */
475static inline void update_mmu_cache_range(struct vm_fault *vmf,
476 struct vm_area_struct *vma, unsigned long address,
477 pte_t *ptep, unsigned int nr)
478{
479 /*
480 * The kernel assumes that TLBs don't cache invalid entries, but
481 * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a
482 * cache flush; it is necessary even after writing invalid entries.
483 * Relying on flush_tlb_fix_spurious_fault would suffice, but
484 * the extra traps reduce performance. So, eagerly SFENCE.VMA.
485 */
486 while (nr--)
487 local_flush_tlb_page(address + nr * PAGE_SIZE);
488}
489#define update_mmu_cache(vma, addr, ptep) \
490 update_mmu_cache_range(NULL, vma, addr, ptep, 1)
491
492#define __HAVE_ARCH_UPDATE_MMU_TLB
493#define update_mmu_tlb update_mmu_cache
494
495static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
496 unsigned long address, pmd_t *pmdp)
497{
498 pte_t *ptep = (pte_t *)pmdp;
499
500 update_mmu_cache(vma, address, ptep);
501}
502
503#define __HAVE_ARCH_PTE_SAME
504static inline int pte_same(pte_t pte_a, pte_t pte_b)
505{
506 return pte_val(pte_a) == pte_val(pte_b);
507}
508
509/*
510 * Certain architectures need to do special things when PTEs within
511 * a page table are directly modified. Thus, the following hook is
512 * made available.
513 */
514static inline void set_pte(pte_t *ptep, pte_t pteval)
515{
516 WRITE_ONCE(*ptep, pteval);
517}
518
519void flush_icache_pte(struct mm_struct *mm, pte_t pte);
520
521static inline void __set_pte_at(struct mm_struct *mm, pte_t *ptep, pte_t pteval)
522{
523 if (pte_present(pteval) && pte_exec(pteval))
524 flush_icache_pte(mm, pteval);
525
526 set_pte(ptep, pteval);
527}
528
529#define PFN_PTE_SHIFT _PAGE_PFN_SHIFT
530
531static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
532 pte_t *ptep, pte_t pteval, unsigned int nr)
533{
534 page_table_check_ptes_set(mm, ptep, pteval, nr);
535
536 for (;;) {
537 __set_pte_at(mm, ptep, pteval);
538 if (--nr == 0)
539 break;
540 ptep++;
541 pte_val(pteval) += 1 << _PAGE_PFN_SHIFT;
542 }
543}
544#define set_ptes set_ptes
545
546static inline void pte_clear(struct mm_struct *mm,
547 unsigned long addr, pte_t *ptep)
548{
549 __set_pte_at(mm, ptep, __pte(0));
550}
551
552#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS /* defined in mm/pgtable.c */
553extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
554 pte_t *ptep, pte_t entry, int dirty);
555#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG /* defined in mm/pgtable.c */
556extern int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long address,
557 pte_t *ptep);
558
559#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
560static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
561 unsigned long address, pte_t *ptep)
562{
563 pte_t pte = __pte(atomic_long_xchg((atomic_long_t *)ptep, 0));
564
565 page_table_check_pte_clear(mm, pte);
566
567 return pte;
568}
569
570#define __HAVE_ARCH_PTEP_SET_WRPROTECT
571static inline void ptep_set_wrprotect(struct mm_struct *mm,
572 unsigned long address, pte_t *ptep)
573{
574 atomic_long_and(~(unsigned long)_PAGE_WRITE, (atomic_long_t *)ptep);
575}
576
577#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
578static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
579 unsigned long address, pte_t *ptep)
580{
581 /*
582 * This comment is borrowed from x86, but applies equally to RISC-V:
583 *
584 * Clearing the accessed bit without a TLB flush
585 * doesn't cause data corruption. [ It could cause incorrect
586 * page aging and the (mistaken) reclaim of hot pages, but the
587 * chance of that should be relatively low. ]
588 *
589 * So as a performance optimization don't flush the TLB when
590 * clearing the accessed bit, it will eventually be flushed by
591 * a context switch or a VM operation anyway. [ In the rare
592 * event of it not getting flushed for a long time the delay
593 * shouldn't really matter because there's no real memory
594 * pressure for swapout to react to. ]
595 */
596 return ptep_test_and_clear_young(vma, address, ptep);
597}
598
599#define pgprot_nx pgprot_nx
600static inline pgprot_t pgprot_nx(pgprot_t _prot)
601{
602 return __pgprot(pgprot_val(_prot) & ~_PAGE_EXEC);
603}
604
605#define pgprot_noncached pgprot_noncached
606static inline pgprot_t pgprot_noncached(pgprot_t _prot)
607{
608 unsigned long prot = pgprot_val(_prot);
609
610 prot &= ~_PAGE_MTMASK;
611 prot |= _PAGE_IO;
612
613 return __pgprot(prot);
614}
615
616#define pgprot_writecombine pgprot_writecombine
617static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
618{
619 unsigned long prot = pgprot_val(_prot);
620
621 prot &= ~_PAGE_MTMASK;
622 prot |= _PAGE_NOCACHE;
623
624 return __pgprot(prot);
625}
626
627/*
628 * THP functions
629 */
630static inline pmd_t pte_pmd(pte_t pte)
631{
632 return __pmd(pte_val(pte));
633}
634
635static inline pmd_t pmd_mkhuge(pmd_t pmd)
636{
637 return pmd;
638}
639
640static inline pmd_t pmd_mkinvalid(pmd_t pmd)
641{
642 return __pmd(pmd_val(pmd) & ~(_PAGE_PRESENT|_PAGE_PROT_NONE));
643}
644
645#define __pmd_to_phys(pmd) (__page_val_to_pfn(pmd_val(pmd)) << PAGE_SHIFT)
646
647static inline unsigned long pmd_pfn(pmd_t pmd)
648{
649 return ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT);
650}
651
652#define __pud_to_phys(pud) (__page_val_to_pfn(pud_val(pud)) << PAGE_SHIFT)
653
654#define pud_pfn pud_pfn
655static inline unsigned long pud_pfn(pud_t pud)
656{
657 return ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT);
658}
659
660static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
661{
662 return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
663}
664
665#define pmd_write pmd_write
666static inline int pmd_write(pmd_t pmd)
667{
668 return pte_write(pmd_pte(pmd));
669}
670
671#define pud_write pud_write
672static inline int pud_write(pud_t pud)
673{
674 return pte_write(pud_pte(pud));
675}
676
677#define pmd_dirty pmd_dirty
678static inline int pmd_dirty(pmd_t pmd)
679{
680 return pte_dirty(pmd_pte(pmd));
681}
682
683#define pmd_young pmd_young
684static inline int pmd_young(pmd_t pmd)
685{
686 return pte_young(pmd_pte(pmd));
687}
688
689static inline int pmd_user(pmd_t pmd)
690{
691 return pte_user(pmd_pte(pmd));
692}
693
694static inline pmd_t pmd_mkold(pmd_t pmd)
695{
696 return pte_pmd(pte_mkold(pmd_pte(pmd)));
697}
698
699static inline pmd_t pmd_mkyoung(pmd_t pmd)
700{
701 return pte_pmd(pte_mkyoung(pmd_pte(pmd)));
702}
703
704static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
705{
706 return pte_pmd(pte_mkwrite_novma(pmd_pte(pmd)));
707}
708
709static inline pmd_t pmd_wrprotect(pmd_t pmd)
710{
711 return pte_pmd(pte_wrprotect(pmd_pte(pmd)));
712}
713
714static inline pmd_t pmd_mkclean(pmd_t pmd)
715{
716 return pte_pmd(pte_mkclean(pmd_pte(pmd)));
717}
718
719static inline pmd_t pmd_mkdirty(pmd_t pmd)
720{
721 return pte_pmd(pte_mkdirty(pmd_pte(pmd)));
722}
723
724static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
725 pmd_t *pmdp, pmd_t pmd)
726{
727 page_table_check_pmd_set(mm, pmdp, pmd);
728 return __set_pte_at(mm, (pte_t *)pmdp, pmd_pte(pmd));
729}
730
731static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
732 pud_t *pudp, pud_t pud)
733{
734 page_table_check_pud_set(mm, pudp, pud);
735 return __set_pte_at(mm, (pte_t *)pudp, pud_pte(pud));
736}
737
738#ifdef CONFIG_PAGE_TABLE_CHECK
739static inline bool pte_user_accessible_page(pte_t pte)
740{
741 return pte_present(pte) && pte_user(pte);
742}
743
744static inline bool pmd_user_accessible_page(pmd_t pmd)
745{
746 return pmd_leaf(pmd) && pmd_user(pmd);
747}
748
749static inline bool pud_user_accessible_page(pud_t pud)
750{
751 return pud_leaf(pud) && pud_user(pud);
752}
753#endif
754
755#ifdef CONFIG_TRANSPARENT_HUGEPAGE
756static inline int pmd_trans_huge(pmd_t pmd)
757{
758 return pmd_leaf(pmd);
759}
760
761#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
762static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
763 unsigned long address, pmd_t *pmdp,
764 pmd_t entry, int dirty)
765{
766 return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
767}
768
769#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
770static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
771 unsigned long address, pmd_t *pmdp)
772{
773 return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
774}
775
776#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
777static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
778 unsigned long address, pmd_t *pmdp)
779{
780 pmd_t pmd = __pmd(atomic_long_xchg((atomic_long_t *)pmdp, 0));
781
782 page_table_check_pmd_clear(mm, pmd);
783
784 return pmd;
785}
786
787#define __HAVE_ARCH_PMDP_SET_WRPROTECT
788static inline void pmdp_set_wrprotect(struct mm_struct *mm,
789 unsigned long address, pmd_t *pmdp)
790{
791 ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
792}
793
794#define pmdp_establish pmdp_establish
795static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
796 unsigned long address, pmd_t *pmdp, pmd_t pmd)
797{
798 page_table_check_pmd_set(vma->vm_mm, pmdp, pmd);
799 return __pmd(atomic_long_xchg((atomic_long_t *)pmdp, pmd_val(pmd)));
800}
801
802#define pmdp_collapse_flush pmdp_collapse_flush
803extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
804 unsigned long address, pmd_t *pmdp);
805#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
806
807/*
808 * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
809 * are !pte_none() && !pte_present().
810 *
811 * Format of swap PTE:
812 * bit 0: _PAGE_PRESENT (zero)
813 * bit 1 to 3: _PAGE_LEAF (zero)
814 * bit 5: _PAGE_PROT_NONE (zero)
815 * bit 6: exclusive marker
816 * bits 7 to 11: swap type
817 * bits 12 to XLEN-1: swap offset
818 */
819#define __SWP_TYPE_SHIFT 7
820#define __SWP_TYPE_BITS 5
821#define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1)
822#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
823
824#define MAX_SWAPFILES_CHECK() \
825 BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
826
827#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
828#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
829#define __swp_entry(type, offset) ((swp_entry_t) \
830 { (((type) & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT) | \
831 ((offset) << __SWP_OFFSET_SHIFT) })
832
833#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
834#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
835
836static inline int pte_swp_exclusive(pte_t pte)
837{
838 return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
839}
840
841static inline pte_t pte_swp_mkexclusive(pte_t pte)
842{
843 return __pte(pte_val(pte) | _PAGE_SWP_EXCLUSIVE);
844}
845
846static inline pte_t pte_swp_clear_exclusive(pte_t pte)
847{
848 return __pte(pte_val(pte) & ~_PAGE_SWP_EXCLUSIVE);
849}
850
851#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
852#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
853#define __swp_entry_to_pmd(swp) __pmd((swp).val)
854#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
855
856/*
857 * In the RV64 Linux scheme, we give the user half of the virtual-address space
858 * and give the kernel the other (upper) half.
859 */
860#ifdef CONFIG_64BIT
861#define KERN_VIRT_START (-(BIT(VA_BITS)) + TASK_SIZE)
862#else
863#define KERN_VIRT_START FIXADDR_START
864#endif
865
866/*
867 * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32.
868 * Note that PGDIR_SIZE must evenly divide TASK_SIZE.
869 * Task size is:
870 * - 0x9fc00000 (~2.5GB) for RV32.
871 * - 0x4000000000 ( 256GB) for RV64 using SV39 mmu
872 * - 0x800000000000 ( 128TB) for RV64 using SV48 mmu
873 * - 0x100000000000000 ( 64PB) for RV64 using SV57 mmu
874 *
875 * Note that PGDIR_SIZE must evenly divide TASK_SIZE since "RISC-V
876 * Instruction Set Manual Volume II: Privileged Architecture" states that
877 * "load and store effective addresses, which are 64bits, must have bits
878 * 63–48 all equal to bit 47, or else a page-fault exception will occur."
879 * Similarly for SV57, bits 63–57 must be equal to bit 56.
880 */
881#ifdef CONFIG_64BIT
882#define TASK_SIZE_64 (PGDIR_SIZE * PTRS_PER_PGD / 2)
883#define TASK_SIZE_MAX LONG_MAX
884
885#ifdef CONFIG_COMPAT
886#define TASK_SIZE_32 (_AC(0x80000000, UL) - PAGE_SIZE)
887#define TASK_SIZE (is_compat_task() ? \
888 TASK_SIZE_32 : TASK_SIZE_64)
889#else
890#define TASK_SIZE TASK_SIZE_64
891#endif
892
893#else
894#define TASK_SIZE FIXADDR_START
895#endif
896
897#else /* CONFIG_MMU */
898
899#define PAGE_SHARED __pgprot(0)
900#define PAGE_KERNEL __pgprot(0)
901#define swapper_pg_dir NULL
902#define TASK_SIZE _AC(-1, UL)
903#define VMALLOC_START _AC(0, UL)
904#define VMALLOC_END TASK_SIZE
905
906#endif /* !CONFIG_MMU */
907
908extern char _start[];
909extern void *_dtb_early_va;
910extern uintptr_t _dtb_early_pa;
911#if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_MMU)
912#define dtb_early_va (*(void **)XIP_FIXUP(&_dtb_early_va))
913#define dtb_early_pa (*(uintptr_t *)XIP_FIXUP(&_dtb_early_pa))
914#else
915#define dtb_early_va _dtb_early_va
916#define dtb_early_pa _dtb_early_pa
917#endif /* CONFIG_XIP_KERNEL */
918extern u64 satp_mode;
919
920void paging_init(void);
921void misc_mem_init(void);
922
923/*
924 * ZERO_PAGE is a global shared page that is always zero,
925 * used for zero-mapped memory areas, etc.
926 */
927extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
928#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
929
930#endif /* !__ASSEMBLY__ */
931
932#endif /* _ASM_RISCV_PGTABLE_H */