Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * arch/arm/include/asm/pgtable.h
4 *
5 * Copyright (C) 1995-2002 Russell King
6 */
7#ifndef _ASMARM_PGTABLE_H
8#define _ASMARM_PGTABLE_H
9
10#include <linux/const.h>
11#include <asm/proc-fns.h>
12
13#ifndef CONFIG_MMU
14
15#include <asm-generic/pgtable-nopud.h>
16#include <asm/pgtable-nommu.h>
17
18#else
19
20#include <asm-generic/pgtable-nopud.h>
21#include <asm/memory.h>
22#include <asm/pgtable-hwdef.h>
23
24
25#include <asm/tlbflush.h>
26
27#ifdef CONFIG_ARM_LPAE
28#include <asm/pgtable-3level.h>
29#else
30#include <asm/pgtable-2level.h>
31#endif
32
33/*
34 * Just any arbitrary offset to the start of the vmalloc VM area: the
35 * current 8MB value just means that there will be a 8MB "hole" after the
36 * physical memory until the kernel virtual memory starts. That means that
37 * any out-of-bounds memory accesses will hopefully be caught.
38 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
39 * area for the same reason. ;)
40 */
41#define VMALLOC_OFFSET (8*1024*1024)
42#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
43#define VMALLOC_END 0xff800000UL
44
45#define LIBRARY_TEXT_START 0x0c000000
46
47#ifndef __ASSEMBLY__
48extern void __pte_error(const char *file, int line, pte_t);
49extern void __pmd_error(const char *file, int line, pmd_t);
50extern void __pgd_error(const char *file, int line, pgd_t);
51
52#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte)
53#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
54#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
55
56/*
57 * This is the lowest virtual address we can permit any user space
58 * mapping to be mapped at. This is particularly important for
59 * non-high vector CPUs.
60 */
61#define FIRST_USER_ADDRESS (PAGE_SIZE * 2)
62
63/*
64 * Use TASK_SIZE as the ceiling argument for free_pgtables() and
65 * free_pgd_range() to avoid freeing the modules pmd when LPAE is enabled (pmd
66 * page shared between user and kernel).
67 */
68#ifdef CONFIG_ARM_LPAE
69#define USER_PGTABLES_CEILING TASK_SIZE
70#endif
71
72/*
73 * The pgprot_* and protection_map entries will be fixed up in runtime
74 * to include the cachable and bufferable bits based on memory policy,
75 * as well as any architecture dependent bits like global/ASID and SMP
76 * shared mapping bits.
77 */
78#define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
79
80extern pgprot_t pgprot_user;
81extern pgprot_t pgprot_kernel;
82
83#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
84
85#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY | L_PTE_NONE)
86#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN)
87#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER)
88#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
89#define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
90#define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
91#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
92#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
93#define PAGE_KERNEL_EXEC pgprot_kernel
94
95#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
96#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
97#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
98#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
99#define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
100#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
101#define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
102
103#define __pgprot_modify(prot,mask,bits) \
104 __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
105
106#define pgprot_noncached(prot) \
107 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
108
109#define pgprot_writecombine(prot) \
110 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
111
112#define pgprot_stronglyordered(prot) \
113 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
114
115#define pgprot_device(prot) \
116 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_DEV_SHARED | L_PTE_SHARED | L_PTE_DIRTY | L_PTE_XN)
117
118#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
119#define pgprot_dmacoherent(prot) \
120 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN)
121#define __HAVE_PHYS_MEM_ACCESS_PROT
122struct file;
123extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
124 unsigned long size, pgprot_t vma_prot);
125#else
126#define pgprot_dmacoherent(prot) \
127 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
128#endif
129
130#endif /* __ASSEMBLY__ */
131
132/*
133 * The table below defines the page protection levels that we insert into our
134 * Linux page table version. These get translated into the best that the
135 * architecture can perform. Note that on most ARM hardware:
136 * 1) We cannot do execute protection
137 * 2) If we could do execute protection, then read is implied
138 * 3) write implies read permissions
139 */
140
141#ifndef __ASSEMBLY__
142/*
143 * ZERO_PAGE is a global shared page that is always zero: used
144 * for zero-mapped memory areas etc..
145 */
146extern struct page *empty_zero_page;
147#define ZERO_PAGE(vaddr) (empty_zero_page)
148
149
150extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
151
152#define pud_page(pud) pmd_page(__pmd(pud_val(pud)))
153#define pud_write(pud) pmd_write(__pmd(pud_val(pud)))
154
155#define pmd_none(pmd) (!pmd_val(pmd))
156
157static inline pte_t *pmd_page_vaddr(pmd_t pmd)
158{
159 return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
160}
161
162#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
163
164#define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
165#define pfn_pte(pfn,prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
166
167#define pte_page(pte) pfn_to_page(pte_pfn(pte))
168#define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot)
169
170#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
171
172#define pte_isset(pte, val) ((u32)(val) == (val) ? pte_val(pte) & (val) \
173 : !!(pte_val(pte) & (val)))
174#define pte_isclear(pte, val) (!(pte_val(pte) & (val)))
175
176#define pte_none(pte) (!pte_val(pte))
177#define pte_present(pte) (pte_isset((pte), L_PTE_PRESENT))
178#define pte_valid(pte) (pte_isset((pte), L_PTE_VALID))
179#define pte_accessible(mm, pte) (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
180#define pte_write(pte) (pte_isclear((pte), L_PTE_RDONLY))
181#define pte_dirty(pte) (pte_isset((pte), L_PTE_DIRTY))
182#define pte_young(pte) (pte_isset((pte), L_PTE_YOUNG))
183#define pte_exec(pte) (pte_isclear((pte), L_PTE_XN))
184
185#define pte_valid_user(pte) \
186 (pte_valid(pte) && pte_isset((pte), L_PTE_USER) && pte_young(pte))
187
188static inline bool pte_access_permitted(pte_t pte, bool write)
189{
190 pteval_t mask = L_PTE_PRESENT | L_PTE_USER;
191 pteval_t needed = mask;
192
193 if (write)
194 mask |= L_PTE_RDONLY;
195
196 return (pte_val(pte) & mask) == needed;
197}
198#define pte_access_permitted pte_access_permitted
199
200#if __LINUX_ARM_ARCH__ < 6
201static inline void __sync_icache_dcache(pte_t pteval)
202{
203}
204#else
205extern void __sync_icache_dcache(pte_t pteval);
206#endif
207
208void set_pte_at(struct mm_struct *mm, unsigned long addr,
209 pte_t *ptep, pte_t pteval);
210
211static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
212{
213 pte_val(pte) &= ~pgprot_val(prot);
214 return pte;
215}
216
217static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
218{
219 pte_val(pte) |= pgprot_val(prot);
220 return pte;
221}
222
223static inline pte_t pte_wrprotect(pte_t pte)
224{
225 return set_pte_bit(pte, __pgprot(L_PTE_RDONLY));
226}
227
228static inline pte_t pte_mkwrite(pte_t pte)
229{
230 return clear_pte_bit(pte, __pgprot(L_PTE_RDONLY));
231}
232
233static inline pte_t pte_mkclean(pte_t pte)
234{
235 return clear_pte_bit(pte, __pgprot(L_PTE_DIRTY));
236}
237
238static inline pte_t pte_mkdirty(pte_t pte)
239{
240 return set_pte_bit(pte, __pgprot(L_PTE_DIRTY));
241}
242
243static inline pte_t pte_mkold(pte_t pte)
244{
245 return clear_pte_bit(pte, __pgprot(L_PTE_YOUNG));
246}
247
248static inline pte_t pte_mkyoung(pte_t pte)
249{
250 return set_pte_bit(pte, __pgprot(L_PTE_YOUNG));
251}
252
253static inline pte_t pte_mkexec(pte_t pte)
254{
255 return clear_pte_bit(pte, __pgprot(L_PTE_XN));
256}
257
258static inline pte_t pte_mknexec(pte_t pte)
259{
260 return set_pte_bit(pte, __pgprot(L_PTE_XN));
261}
262
263static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
264{
265 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
266 L_PTE_NONE | L_PTE_VALID;
267 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
268 return pte;
269}
270
271/*
272 * Encode and decode a swap entry. Swap entries are stored in the Linux
273 * page tables as follows:
274 *
275 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
276 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
277 * <--------------- offset ------------------------> < type -> 0 0
278 *
279 * This gives us up to 31 swap files and 128GB per swap file. Note that
280 * the offset field is always non-zero.
281 */
282#define __SWP_TYPE_SHIFT 2
283#define __SWP_TYPE_BITS 5
284#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
285#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
286
287#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
288#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
289#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
290
291#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
292#define __swp_entry_to_pte(swp) __pte((swp).val | PTE_TYPE_FAULT)
293
294/*
295 * It is an error for the kernel to have more swap files than we can
296 * encode in the PTEs. This ensures that we know when MAX_SWAPFILES
297 * is increased beyond what we presently support.
298 */
299#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
300
301/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
302/* FIXME: this is not correct */
303#define kern_addr_valid(addr) (1)
304
305/*
306 * We provide our own arch_get_unmapped_area to cope with VIPT caches.
307 */
308#define HAVE_ARCH_UNMAPPED_AREA
309#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
310
311#endif /* !__ASSEMBLY__ */
312
313#endif /* CONFIG_MMU */
314
315#endif /* _ASMARM_PGTABLE_H */