at master 3.3 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com> 4 * Copyright (C) 2012 Regents of the University of California 5 */ 6 7#ifndef _ASM_RISCV_PGALLOC_H 8#define _ASM_RISCV_PGALLOC_H 9 10#include <linux/mm.h> 11#include <asm/sbi.h> 12#include <asm/tlb.h> 13 14#ifdef CONFIG_MMU 15#define __HAVE_ARCH_PUD_FREE 16#include <asm-generic/pgalloc.h> 17 18static inline void pmd_populate_kernel(struct mm_struct *mm, 19 pmd_t *pmd, pte_t *pte) 20{ 21 unsigned long pfn = virt_to_pfn(pte); 22 23 set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); 24} 25 26static inline void pmd_populate(struct mm_struct *mm, 27 pmd_t *pmd, pgtable_t pte) 28{ 29 unsigned long pfn = virt_to_pfn(page_address(pte)); 30 31 set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); 32} 33 34#ifndef __PAGETABLE_PMD_FOLDED 35static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) 36{ 37 unsigned long pfn = virt_to_pfn(pmd); 38 39 set_pud(pud, __pud((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); 40} 41 42static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud) 43{ 44 if (pgtable_l4_enabled) { 45 unsigned long pfn = virt_to_pfn(pud); 46 47 set_p4d(p4d, __p4d((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); 48 } 49} 50 51static inline void p4d_populate_safe(struct mm_struct *mm, p4d_t *p4d, 52 pud_t *pud) 53{ 54 if (pgtable_l4_enabled) { 55 unsigned long pfn = virt_to_pfn(pud); 56 57 set_p4d_safe(p4d, 58 __p4d((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); 59 } 60} 61 62static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d) 63{ 64 if (pgtable_l5_enabled) { 65 unsigned long pfn = virt_to_pfn(p4d); 66 67 set_pgd(pgd, __pgd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); 68 } 69} 70 71static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd, 72 p4d_t *p4d) 73{ 74 if (pgtable_l5_enabled) { 75 unsigned long pfn = virt_to_pfn(p4d); 76 77 set_pgd_safe(pgd, 78 __pgd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); 79 } 80} 81 82#define pud_free pud_free 83static inline void pud_free(struct mm_struct *mm, pud_t *pud) 84{ 85 if (pgtable_l4_enabled) 86 __pud_free(mm, pud); 87} 88 89static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, 90 unsigned long addr) 91{ 92 if (pgtable_l4_enabled) 93 tlb_remove_ptdesc(tlb, virt_to_ptdesc(pud)); 94} 95 96static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, 97 unsigned long addr) 98{ 99 if (pgtable_l5_enabled) 100 tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d)); 101} 102#endif /* __PAGETABLE_PMD_FOLDED */ 103 104static inline void sync_kernel_mappings(pgd_t *pgd) 105{ 106 memcpy(pgd + USER_PTRS_PER_PGD, 107 init_mm.pgd + USER_PTRS_PER_PGD, 108 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); 109} 110 111static inline pgd_t *pgd_alloc(struct mm_struct *mm) 112{ 113 pgd_t *pgd; 114 115 pgd = __pgd_alloc(mm, 0); 116 if (likely(pgd != NULL)) { 117 /* Copy kernel mappings */ 118 sync_kernel_mappings(pgd); 119 } 120 return pgd; 121} 122 123#ifndef __PAGETABLE_PMD_FOLDED 124 125static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, 126 unsigned long addr) 127{ 128 tlb_remove_ptdesc(tlb, virt_to_ptdesc(pmd)); 129} 130 131#endif /* __PAGETABLE_PMD_FOLDED */ 132 133static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, 134 unsigned long addr) 135{ 136 tlb_remove_ptdesc(tlb, page_ptdesc(pte)); 137} 138#endif /* CONFIG_MMU */ 139 140#endif /* _ASM_RISCV_PGALLOC_H */