Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4 * Copyright 2003 PathScale, Inc.
5 * Derived from include/asm-i386/pgtable.h
6 */
7
8#ifndef __UM_PGTABLE_H
9#define __UM_PGTABLE_H
10
11#include <asm/page.h>
12#include <linux/mm_types.h>
13
14#define _PAGE_PRESENT 0x001
15#define _PAGE_NEEDSYNC 0x002
16#define _PAGE_RW 0x020
17#define _PAGE_USER 0x040
18#define _PAGE_ACCESSED 0x080
19#define _PAGE_DIRTY 0x100
20/* If _PAGE_PRESENT is clear, we use these: */
21#define _PAGE_PROTNONE 0x010 /* if the user mapped it with PROT_NONE;
22 pte_present gives true */
23
24/* We borrow bit 10 to store the exclusive marker in swap PTEs. */
25#define _PAGE_SWP_EXCLUSIVE 0x400
26
27#if CONFIG_PGTABLE_LEVELS == 4
28#include <asm/pgtable-4level.h>
29#elif CONFIG_PGTABLE_LEVELS == 2
30#include <asm/pgtable-2level.h>
31#else
32#error "Unsupported number of page table levels"
33#endif
34
35extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
36
37/* zero page used for uninitialized stuff */
38extern unsigned long *empty_zero_page;
39
40/* Just any arbitrary offset to the start of the vmalloc VM area: the
41 * current 8MB value just means that there will be a 8MB "hole" after the
42 * physical memory until the kernel virtual memory starts. That means that
43 * any out-of-bounds memory accesses will hopefully be caught.
44 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
45 * area for the same reason. ;)
46 */
47
48extern unsigned long end_iomem;
49
50#define VMALLOC_OFFSET (__va_space)
51#define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
52#define VMALLOC_END (TASK_SIZE-2*PAGE_SIZE)
53#define MODULES_VADDR VMALLOC_START
54#define MODULES_END VMALLOC_END
55
56#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
57#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
58#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
59#define __PAGE_KERNEL_EXEC \
60 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
61#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
62#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
63#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
64#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
65#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
66#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
67
68/*
69 * The i386 can't do page protection for execute, and considers that the same
70 * are read.
71 * Also, write permissions imply read permissions. This is the closest we can
72 * get..
73 */
74
75/*
76 * ZERO_PAGE is a global shared page that is always zero: used
77 * for zero-mapped memory areas etc..
78 */
79#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
80
81#define pte_clear(mm, addr, xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEEDSYNC))
82
83#define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEEDSYNC))
84#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
85
86#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
87#define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEEDSYNC; } while (0)
88
89#define pmd_needsync(x) (pmd_val(x) & _PAGE_NEEDSYNC)
90#define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEEDSYNC)
91
92#define pud_needsync(x) (pud_val(x) & _PAGE_NEEDSYNC)
93#define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEEDSYNC)
94
95#define p4d_needsync(x) (p4d_val(x) & _PAGE_NEEDSYNC)
96#define p4d_mkuptodate(x) (p4d_val(x) &= ~_PAGE_NEEDSYNC)
97
98#define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT)
99#define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
100
101#define pte_page(x) pfn_to_page(pte_pfn(x))
102
103#define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
104
105/*
106 * =================================
107 * Flags checking section.
108 * =================================
109 */
110
111static inline int pte_none(pte_t pte)
112{
113 return pte_is_zero(pte);
114}
115
116/*
117 * The following only work if pte_present() is true.
118 * Undefined behaviour if not..
119 */
120static inline int pte_read(pte_t pte)
121{
122 return((pte_get_bits(pte, _PAGE_USER)) &&
123 !(pte_get_bits(pte, _PAGE_PROTNONE)));
124}
125
126static inline int pte_exec(pte_t pte){
127 return((pte_get_bits(pte, _PAGE_USER)) &&
128 !(pte_get_bits(pte, _PAGE_PROTNONE)));
129}
130
131static inline int pte_write(pte_t pte)
132{
133 return((pte_get_bits(pte, _PAGE_RW)) &&
134 !(pte_get_bits(pte, _PAGE_PROTNONE)));
135}
136
137static inline int pte_dirty(pte_t pte)
138{
139 return pte_get_bits(pte, _PAGE_DIRTY);
140}
141
142static inline int pte_young(pte_t pte)
143{
144 return pte_get_bits(pte, _PAGE_ACCESSED);
145}
146
147static inline int pte_needsync(pte_t pte)
148{
149 return pte_get_bits(pte, _PAGE_NEEDSYNC);
150}
151
152/*
153 * =================================
154 * Flags setting section.
155 * =================================
156 */
157
158static inline pte_t pte_mkclean(pte_t pte)
159{
160 pte_clear_bits(pte, _PAGE_DIRTY);
161 return(pte);
162}
163
164static inline pte_t pte_mkold(pte_t pte)
165{
166 pte_clear_bits(pte, _PAGE_ACCESSED);
167 return(pte);
168}
169
170static inline pte_t pte_wrprotect(pte_t pte)
171{
172 pte_clear_bits(pte, _PAGE_RW);
173 return pte;
174}
175
176static inline pte_t pte_mkread(pte_t pte)
177{
178 pte_set_bits(pte, _PAGE_USER);
179 return pte;
180}
181
182static inline pte_t pte_mkdirty(pte_t pte)
183{
184 pte_set_bits(pte, _PAGE_DIRTY);
185 return(pte);
186}
187
188static inline pte_t pte_mkyoung(pte_t pte)
189{
190 pte_set_bits(pte, _PAGE_ACCESSED);
191 return(pte);
192}
193
194static inline pte_t pte_mkwrite_novma(pte_t pte)
195{
196 pte_set_bits(pte, _PAGE_RW);
197 return pte;
198}
199
200static inline pte_t pte_mkuptodate(pte_t pte)
201{
202 pte_clear_bits(pte, _PAGE_NEEDSYNC);
203 return pte;
204}
205
206static inline pte_t pte_mkneedsync(pte_t pte)
207{
208 pte_set_bits(pte, _PAGE_NEEDSYNC);
209 return(pte);
210}
211
212static inline void set_pte(pte_t *pteptr, pte_t pteval)
213{
214 pte_copy(*pteptr, pteval);
215
216 /* If it's a swap entry, it needs to be marked _PAGE_NEEDSYNC so
217 * update_pte_range knows to unmap it.
218 */
219
220 *pteptr = pte_mkneedsync(*pteptr);
221}
222
223#define PFN_PTE_SHIFT PAGE_SHIFT
224
225static inline void um_tlb_mark_sync(struct mm_struct *mm, unsigned long start,
226 unsigned long end)
227{
228 if (!mm->context.sync_tlb_range_to) {
229 mm->context.sync_tlb_range_from = start;
230 mm->context.sync_tlb_range_to = end;
231 } else {
232 if (start < mm->context.sync_tlb_range_from)
233 mm->context.sync_tlb_range_from = start;
234 if (end > mm->context.sync_tlb_range_to)
235 mm->context.sync_tlb_range_to = end;
236 }
237}
238
239#define set_ptes set_ptes
240static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
241 pte_t *ptep, pte_t pte, int nr)
242{
243 /* Basically the default implementation */
244 size_t length = nr * PAGE_SIZE;
245
246 for (;;) {
247 set_pte(ptep, pte);
248 if (--nr == 0)
249 break;
250 ptep++;
251 pte = __pte(pte_val(pte) + (nr << PFN_PTE_SHIFT));
252 }
253
254 um_tlb_mark_sync(mm, addr, addr + length);
255}
256
257#define __HAVE_ARCH_PTE_SAME
258static inline int pte_same(pte_t pte_a, pte_t pte_b)
259{
260 return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEEDSYNC);
261}
262
263#define __virt_to_page(virt) phys_to_page(__pa(virt))
264#define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
265
266static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
267{
268 pte_t pte;
269
270 pte_set_val(pte, pfn_to_phys(pfn), pgprot);
271
272 return pte;
273}
274
275static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
276{
277 pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot);
278 return pte;
279}
280
281/*
282 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
283 *
284 * this macro returns the index of the entry in the pmd page which would
285 * control the given virtual address
286 */
287#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
288
289struct mm_struct;
290extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
291
292#define update_mmu_cache(vma,address,ptep) do {} while (0)
293#define update_mmu_cache_range(vmf, vma, address, ptep, nr) do {} while (0)
294
295/*
296 * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
297 * are !pte_none() && !pte_present().
298 *
299 * Format of swap PTEs:
300 *
301 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
302 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
303 * <--------------- offset ----------------> E < type -> 0 0 0 1 0
304 *
305 * E is the exclusive marker that is not stored in swap entries.
306 * _PAGE_NEEDSYNC (bit 1) is always set to 1 in set_pte().
307 */
308#define __swp_type(x) (((x).val >> 5) & 0x1f)
309#define __swp_offset(x) ((x).val >> 11)
310
311#define __swp_entry(type, offset) \
312 ((swp_entry_t) { (((type) & 0x1f) << 5) | ((offset) << 11) })
313#define __pte_to_swp_entry(pte) \
314 ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
315#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
316
317static inline bool pte_swp_exclusive(pte_t pte)
318{
319 return pte_get_bits(pte, _PAGE_SWP_EXCLUSIVE);
320}
321
322static inline pte_t pte_swp_mkexclusive(pte_t pte)
323{
324 pte_set_bits(pte, _PAGE_SWP_EXCLUSIVE);
325 return pte;
326}
327
328static inline pte_t pte_swp_clear_exclusive(pte_t pte)
329{
330 pte_clear_bits(pte, _PAGE_SWP_EXCLUSIVE);
331 return pte;
332}
333
334#endif