Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v6.14 158 lines 3.9 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com> 4 * Copyright (C) 2012 Regents of the University of California 5 */ 6 7#ifndef _ASM_RISCV_PGALLOC_H 8#define _ASM_RISCV_PGALLOC_H 9 10#include <linux/mm.h> 11#include <asm/sbi.h> 12#include <asm/tlb.h> 13 14#ifdef CONFIG_MMU 15#define __HAVE_ARCH_PUD_FREE 16#include <asm-generic/pgalloc.h> 17 18/* 19 * While riscv platforms with riscv_ipi_for_rfence as true require an IPI to 20 * perform TLB shootdown, some platforms with riscv_ipi_for_rfence as false use 21 * SBI to perform TLB shootdown. To keep software pagetable walkers safe in this 22 * case we switch to RCU based table free (MMU_GATHER_RCU_TABLE_FREE). See the 23 * comment below 'ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE' in include/asm-generic/tlb.h 24 * for more details. 25 */ 26static inline void riscv_tlb_remove_ptdesc(struct mmu_gather *tlb, void *pt) 27{ 28 if (riscv_use_sbi_for_rfence()) { 29 tlb_remove_ptdesc(tlb, pt); 30 } else { 31 pagetable_dtor(pt); 32 tlb_remove_page_ptdesc(tlb, pt); 33 } 34} 35 36static inline void pmd_populate_kernel(struct mm_struct *mm, 37 pmd_t *pmd, pte_t *pte) 38{ 39 unsigned long pfn = virt_to_pfn(pte); 40 41 set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); 42} 43 44static inline void pmd_populate(struct mm_struct *mm, 45 pmd_t *pmd, pgtable_t pte) 46{ 47 unsigned long pfn = virt_to_pfn(page_address(pte)); 48 49 set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); 50} 51 52#ifndef __PAGETABLE_PMD_FOLDED 53static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) 54{ 55 unsigned long pfn = virt_to_pfn(pmd); 56 57 set_pud(pud, __pud((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); 58} 59 60static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud) 61{ 62 if (pgtable_l4_enabled) { 63 unsigned long pfn = virt_to_pfn(pud); 64 65 set_p4d(p4d, __p4d((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); 66 } 67} 68 69static inline void p4d_populate_safe(struct mm_struct *mm, p4d_t *p4d, 70 pud_t *pud) 71{ 72 if (pgtable_l4_enabled) { 73 unsigned long pfn = virt_to_pfn(pud); 74 75 set_p4d_safe(p4d, 76 __p4d((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); 77 } 78} 79 80static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d) 81{ 82 if (pgtable_l5_enabled) { 83 unsigned long pfn = virt_to_pfn(p4d); 84 85 set_pgd(pgd, __pgd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); 86 } 87} 88 89static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd, 90 p4d_t *p4d) 91{ 92 if (pgtable_l5_enabled) { 93 unsigned long pfn = virt_to_pfn(p4d); 94 95 set_pgd_safe(pgd, 96 __pgd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); 97 } 98} 99 100#define pud_free pud_free 101static inline void pud_free(struct mm_struct *mm, pud_t *pud) 102{ 103 if (pgtable_l4_enabled) 104 __pud_free(mm, pud); 105} 106 107static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, 108 unsigned long addr) 109{ 110 if (pgtable_l4_enabled) 111 riscv_tlb_remove_ptdesc(tlb, virt_to_ptdesc(pud)); 112} 113 114static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, 115 unsigned long addr) 116{ 117 if (pgtable_l5_enabled) 118 riscv_tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d)); 119} 120#endif /* __PAGETABLE_PMD_FOLDED */ 121 122static inline void sync_kernel_mappings(pgd_t *pgd) 123{ 124 memcpy(pgd + USER_PTRS_PER_PGD, 125 init_mm.pgd + USER_PTRS_PER_PGD, 126 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); 127} 128 129static inline pgd_t *pgd_alloc(struct mm_struct *mm) 130{ 131 pgd_t *pgd; 132 133 pgd = __pgd_alloc(mm, 0); 134 if (likely(pgd != NULL)) { 135 /* Copy kernel mappings */ 136 sync_kernel_mappings(pgd); 137 } 138 return pgd; 139} 140 141#ifndef __PAGETABLE_PMD_FOLDED 142 143static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, 144 unsigned long addr) 145{ 146 riscv_tlb_remove_ptdesc(tlb, virt_to_ptdesc(pmd)); 147} 148 149#endif /* __PAGETABLE_PMD_FOLDED */ 150 151static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, 152 unsigned long addr) 153{ 154 riscv_tlb_remove_ptdesc(tlb, page_ptdesc(pte)); 155} 156#endif /* CONFIG_MMU */ 157 158#endif /* _ASM_RISCV_PGALLOC_H */