Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_POWERPC_PGTABLE_H
3#define _ASM_POWERPC_PGTABLE_H
4
5#ifndef __ASSEMBLY__
6#include <linux/mmdebug.h>
7#include <linux/mmzone.h>
8#include <asm/processor.h> /* For TASK_SIZE */
9#include <asm/mmu.h>
10#include <asm/page.h>
11#include <asm/tlbflush.h>
12
13struct mm_struct;
14
15#endif /* !__ASSEMBLY__ */
16
17#ifdef CONFIG_PPC_BOOK3S
18#include <asm/book3s/pgtable.h>
19#else
20#include <asm/nohash/pgtable.h>
21#endif /* !CONFIG_PPC_BOOK3S */
22
23/*
24 * Protection used for kernel text. We want the debuggers to be able to
25 * set breakpoints anywhere, so don't write protect the kernel text
26 * on platforms where such control is possible.
27 */
28#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) || \
29 defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
30#define PAGE_KERNEL_TEXT PAGE_KERNEL_X
31#else
32#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
33#endif
34
35/* Make modules code happy. We don't set RO yet */
36#define PAGE_KERNEL_EXEC PAGE_KERNEL_X
37
38/* Advertise special mapping type for AGP */
39#define PAGE_AGP (PAGE_KERNEL_NC)
40#define HAVE_PAGE_AGP
41
42#ifndef __ASSEMBLY__
43
44void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
45 pte_t pte, unsigned int nr);
46#define set_ptes set_ptes
47#define update_mmu_cache(vma, addr, ptep) \
48 update_mmu_cache_range(NULL, vma, addr, ptep, 1)
49
50#ifndef MAX_PTRS_PER_PGD
51#define MAX_PTRS_PER_PGD PTRS_PER_PGD
52#endif
53
54/* Keep these as a macros to avoid include dependency mess */
55#define pte_page(x) pfn_to_page(pte_pfn(x))
56#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
57
58static inline unsigned long pte_pfn(pte_t pte)
59{
60 return (pte_val(pte) & PTE_RPN_MASK) >> PTE_RPN_SHIFT;
61}
62
63/*
64 * Select all bits except the pfn
65 */
66static inline pgprot_t pte_pgprot(pte_t pte)
67{
68 unsigned long pte_flags;
69
70 pte_flags = pte_val(pte) & ~PTE_RPN_MASK;
71 return __pgprot(pte_flags);
72}
73
74static inline pgprot_t pgprot_nx(pgprot_t prot)
75{
76 return pte_pgprot(pte_exprotect(__pte(pgprot_val(prot))));
77}
78#define pgprot_nx pgprot_nx
79
80#ifndef pmd_page_vaddr
81static inline const void *pmd_page_vaddr(pmd_t pmd)
82{
83 return __va(pmd_val(pmd) & ~PMD_MASKED_BITS);
84}
85#define pmd_page_vaddr pmd_page_vaddr
86#endif
87/*
88 * ZERO_PAGE is a global shared page that is always zero: used
89 * for zero-mapped memory areas etc..
90 */
91extern unsigned long empty_zero_page[];
92#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
93
94extern pgd_t swapper_pg_dir[];
95
96extern void paging_init(void);
97void poking_init(void);
98
99extern unsigned long ioremap_bot;
100extern const pgprot_t protection_map[16];
101
102#ifndef CONFIG_TRANSPARENT_HUGEPAGE
103#define pmd_large(pmd) 0
104#endif
105
106/* can we use this in kvm */
107unsigned long vmalloc_to_phys(void *vmalloc_addr);
108
109void pgtable_cache_add(unsigned int shift);
110
111pte_t *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va);
112
113#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32)
114void mark_initmem_nx(void);
115#else
116static inline void mark_initmem_nx(void) { }
117#endif
118
119#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
120int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
121 pte_t *ptep, pte_t entry, int dirty);
122
123pgprot_t __phys_mem_access_prot(unsigned long pfn, unsigned long size,
124 pgprot_t vma_prot);
125
126struct file;
127static inline pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
128 unsigned long size, pgprot_t vma_prot)
129{
130 return __phys_mem_access_prot(pfn, size, vma_prot);
131}
132#define __HAVE_PHYS_MEM_ACCESS_PROT
133
134void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
135
136/*
137 * This gets called at the end of handling a page fault, when
138 * the kernel has put a new PTE into the page table for the process.
139 * We use it to ensure coherency between the i-cache and d-cache
140 * for the page which has just been mapped in.
141 * On machines which use an MMU hash table, we use this to put a
142 * corresponding HPTE into the hash table ahead of time, instead of
143 * waiting for the inevitable extra hash-table miss exception.
144 */
145static inline void update_mmu_cache_range(struct vm_fault *vmf,
146 struct vm_area_struct *vma, unsigned long address,
147 pte_t *ptep, unsigned int nr)
148{
149 if ((mmu_has_feature(MMU_FTR_HPTE_TABLE) && !radix_enabled()) ||
150 (IS_ENABLED(CONFIG_PPC_E500) && IS_ENABLED(CONFIG_HUGETLB_PAGE)))
151 __update_mmu_cache(vma, address, ptep);
152}
153
154/*
155 * When used, PTE_FRAG_NR is defined in subarch pgtable.h
156 * so we are sure it is included when arriving here.
157 */
158#ifdef PTE_FRAG_NR
159static inline void *pte_frag_get(mm_context_t *ctx)
160{
161 return ctx->pte_frag;
162}
163
164static inline void pte_frag_set(mm_context_t *ctx, void *p)
165{
166 ctx->pte_frag = p;
167}
168#else
169#define PTE_FRAG_NR 1
170#define PTE_FRAG_SIZE_SHIFT PAGE_SHIFT
171#define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT)
172
173static inline void *pte_frag_get(mm_context_t *ctx)
174{
175 return NULL;
176}
177
178static inline void pte_frag_set(mm_context_t *ctx, void *p)
179{
180}
181#endif
182
183#ifndef pmd_is_leaf
184#define pmd_is_leaf pmd_is_leaf
185static inline bool pmd_is_leaf(pmd_t pmd)
186{
187 return false;
188}
189#endif
190
191#ifndef pud_is_leaf
192#define pud_is_leaf pud_is_leaf
193static inline bool pud_is_leaf(pud_t pud)
194{
195 return false;
196}
197#endif
198
199#ifndef p4d_is_leaf
200#define p4d_is_leaf p4d_is_leaf
201static inline bool p4d_is_leaf(p4d_t p4d)
202{
203 return false;
204}
205#endif
206
207#define pmd_pgtable pmd_pgtable
208static inline pgtable_t pmd_pgtable(pmd_t pmd)
209{
210 return (pgtable_t)pmd_page_vaddr(pmd);
211}
212
213#ifdef CONFIG_PPC64
214int __meminit vmemmap_populated(unsigned long vmemmap_addr, int vmemmap_map_size);
215bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start,
216 unsigned long page_size);
217/*
218 * mm/memory_hotplug.c:mhp_supports_memmap_on_memory goes into details
219 * some of the restrictions. We don't check for PMD_SIZE because our
220 * vmemmap allocation code can fallback correctly. The pageblock
221 * alignment requirement is met using altmap->reserve blocks.
222 */
223#define arch_supports_memmap_on_memory arch_supports_memmap_on_memory
224static inline bool arch_supports_memmap_on_memory(unsigned long vmemmap_size)
225{
226 if (!radix_enabled())
227 return false;
228 /*
229 * With 4K page size and 2M PMD_SIZE, we can align
230 * things better with memory block size value
231 * starting from 128MB. Hence align things with PMD_SIZE.
232 */
233 if (IS_ENABLED(CONFIG_PPC_4K_PAGES))
234 return IS_ALIGNED(vmemmap_size, PMD_SIZE);
235 return true;
236}
237
238#endif /* CONFIG_PPC64 */
239
240#endif /* __ASSEMBLY__ */
241
242#endif /* _ASM_POWERPC_PGTABLE_H */