at v5.3 72 lines 1.7 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2// Copyright (C) 2005-2017 Andes Technology Corporation 3 4#ifndef _ASMNDS32_PGALLOC_H 5#define _ASMNDS32_PGALLOC_H 6 7#include <asm/processor.h> 8#include <asm/cacheflush.h> 9#include <asm/tlbflush.h> 10#include <asm/proc-fns.h> 11 12#define __HAVE_ARCH_PTE_ALLOC_ONE 13#include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */ 14 15/* 16 * Since we have only two-level page tables, these are trivial 17 */ 18#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); }) 19#define pmd_free(mm, pmd) do { } while (0) 20#define pgd_populate(mm, pmd, pte) BUG() 21#define pmd_pgtable(pmd) pmd_page(pmd) 22 23extern pgd_t *pgd_alloc(struct mm_struct *mm); 24extern void pgd_free(struct mm_struct *mm, pgd_t * pgd); 25 26#define check_pgt_cache() do { } while (0) 27 28static inline pgtable_t pte_alloc_one(struct mm_struct *mm) 29{ 30 pgtable_t pte; 31 32 pte = __pte_alloc_one(mm, GFP_PGTABLE_USER); 33 if (pte) 34 cpu_dcache_wb_page((unsigned long)page_address(pte)); 35 36 return pte; 37} 38 39/* 40 * Populate the pmdp entry with a pointer to the pte. This pmd is part 41 * of the mm address space. 42 * 43 * Ensure that we always set both PMD entries. 44 */ 45static inline void 46pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmdp, pte_t * ptep) 47{ 48 unsigned long pte_ptr = (unsigned long)ptep; 49 unsigned long pmdval; 50 51 BUG_ON(mm != &init_mm); 52 53 /* 54 * The pmd must be loaded with the physical 55 * address of the PTE table 56 */ 57 pmdval = __pa(pte_ptr) | _PAGE_KERNEL_TABLE; 58 set_pmd(pmdp, __pmd(pmdval)); 59} 60 61static inline void 62pmd_populate(struct mm_struct *mm, pmd_t * pmdp, pgtable_t ptep) 63{ 64 unsigned long pmdval; 65 66 BUG_ON(mm == &init_mm); 67 68 pmdval = page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE; 69 set_pmd(pmdp, __pmd(pmdval)); 70} 71 72#endif