Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Page table support for the Hexagon architecture
4 *
5 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
6 */
7
8#ifndef _ASM_PGTABLE_H
9#define _ASM_PGTABLE_H
10
11/*
12 * Page table definitions for Qualcomm Hexagon processor.
13 */
14#include <asm/page.h>
15#include <asm-generic/pgtable-nopmd.h>
16
17/* A handy thing to have if one has the RAM. Declared in head.S */
18extern unsigned long empty_zero_page;
19
20/*
21 * The PTE model described here is that of the Hexagon Virtual Machine,
22 * which autonomously walks 2-level page tables. At a lower level, we
23 * also describe the RISCish software-loaded TLB entry structure of
24 * the underlying Hexagon processor. A kernel built to run on the
25 * virtual machine has no need to know about the underlying hardware.
26 */
27#include <asm/vm_mmu.h>
28
29/*
30 * To maximize the comfort level for the PTE manipulation macros,
31 * define the "well known" architecture-specific bits.
32 */
33#define _PAGE_READ __HVM_PTE_R
34#define _PAGE_WRITE __HVM_PTE_W
35#define _PAGE_EXECUTE __HVM_PTE_X
36#define _PAGE_USER __HVM_PTE_U
37
38/*
39 * We have a total of 4 "soft" bits available in the abstract PTE.
40 * The two mandatory software bits are Dirty and Accessed.
41 * To make nonlinear swap work according to the more recent
42 * model, we want a low order "Present" bit to indicate whether
43 * the PTE describes MMU programming or swap space.
44 */
45#define _PAGE_PRESENT (1<<0)
46#define _PAGE_DIRTY (1<<1)
47#define _PAGE_ACCESSED (1<<2)
48
49/*
50 * For now, let's say that Valid and Present are the same thing.
51 * Alternatively, we could say that it's the "or" of R, W, and X
52 * permissions.
53 */
54#define _PAGE_VALID _PAGE_PRESENT
55
56/*
57 * We're not defining _PAGE_GLOBAL here, since there's no concept
58 * of global pages or ASIDs exposed to the Hexagon Virtual Machine,
59 * and we want to use the same page table structures and macros in
60 * the native kernel as we do in the virtual machine kernel.
61 * So we'll put up with a bit of inefficiency for now...
62 */
63
64/*
65 * Top "FOURTH" level (pgd), which for the Hexagon VM is really
66 * only the second from the bottom, pgd and pud both being collapsed.
67 * Each entry represents 4MB of virtual address space, 4K of table
68 * thus maps the full 4GB.
69 */
70#define PGDIR_SHIFT 22
71#define PTRS_PER_PGD 1024
72
73#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
74#define PGDIR_MASK (~(PGDIR_SIZE-1))
75
76#ifdef CONFIG_PAGE_SIZE_4KB
77#define PTRS_PER_PTE 1024
78#endif
79
80#ifdef CONFIG_PAGE_SIZE_16KB
81#define PTRS_PER_PTE 256
82#endif
83
84#ifdef CONFIG_PAGE_SIZE_64KB
85#define PTRS_PER_PTE 64
86#endif
87
88#ifdef CONFIG_PAGE_SIZE_256KB
89#define PTRS_PER_PTE 16
90#endif
91
92#ifdef CONFIG_PAGE_SIZE_1MB
93#define PTRS_PER_PTE 4
94#endif
95
96/* Any bigger and the PTE disappears. */
97#define pgd_ERROR(e) \
98 printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__,\
99 pgd_val(e))
100
101/*
102 * Page Protection Constants. Includes (in this variant) cache attributes.
103 */
104extern unsigned long _dflt_cache_att;
105
106#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_USER | \
107 _dflt_cache_att)
108#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \
109 _PAGE_READ | _PAGE_EXECUTE | _dflt_cache_att)
110#define PAGE_COPY PAGE_READONLY
111#define PAGE_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
112 _PAGE_READ | _PAGE_EXECUTE | _dflt_cache_att)
113#define PAGE_COPY_EXEC PAGE_EXEC
114#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \
115 _PAGE_EXECUTE | _PAGE_WRITE | _dflt_cache_att)
116#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | \
117 _PAGE_WRITE | _PAGE_EXECUTE | _dflt_cache_att)
118
119
120/*
121 * Aliases for mapping mmap() protection bits to page protections.
122 * These get used for static initialization, so using the _dflt_cache_att
123 * variable for the default cache attribute isn't workable. If the
124 * default gets changed at boot time, the boot option code has to
125 * update data structures like the protaction_map[] array.
126 */
127#define CACHEDEF (CACHE_DEFAULT << 6)
128
129/* Private (copy-on-write) page protections. */
130#define __P000 __pgprot(_PAGE_PRESENT | _PAGE_USER | CACHEDEF)
131#define __P001 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | CACHEDEF)
132#define __P010 __P000 /* Write-only copy-on-write */
133#define __P011 __P001 /* Read/Write copy-on-write */
134#define __P100 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
135 _PAGE_EXECUTE | CACHEDEF)
136#define __P101 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_EXECUTE | \
137 _PAGE_READ | CACHEDEF)
138#define __P110 __P100 /* Write/execute copy-on-write */
139#define __P111 __P101 /* Read/Write/Execute, copy-on-write */
140
141/* Shared page protections. */
142#define __S000 __P000
143#define __S001 __P001
144#define __S010 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
145 _PAGE_WRITE | CACHEDEF)
146#define __S011 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \
147 _PAGE_WRITE | CACHEDEF)
148#define __S100 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
149 _PAGE_EXECUTE | CACHEDEF)
150#define __S101 __P101
151#define __S110 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
152 _PAGE_EXECUTE | _PAGE_WRITE | CACHEDEF)
153#define __S111 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \
154 _PAGE_EXECUTE | _PAGE_WRITE | CACHEDEF)
155
156extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* located in head.S */
157
158/* HUGETLB not working currently */
159#ifdef CONFIG_HUGETLB_PAGE
160#define pte_mkhuge(pte) __pte((pte_val(pte) & ~0x3) | HVM_HUGEPAGE_SIZE)
161#endif
162
163/*
164 * For now, assume that higher-level code will do TLB/MMU invalidations
165 * and don't insert that overhead into this low-level function.
166 */
167extern void sync_icache_dcache(pte_t pte);
168
169#define pte_present_exec_user(pte) \
170 ((pte_val(pte) & (_PAGE_EXECUTE | _PAGE_USER)) == \
171 (_PAGE_EXECUTE | _PAGE_USER))
172
173static inline void set_pte(pte_t *ptep, pte_t pteval)
174{
175 /* should really be using pte_exec, if it weren't declared later. */
176 if (pte_present_exec_user(pteval))
177 sync_icache_dcache(pteval);
178
179 *ptep = pteval;
180}
181
182/*
183 * For the Hexagon Virtual Machine MMU (or its emulation), a null/invalid
184 * L1 PTE (PMD/PGD) has 7 in the least significant bits. For the L2 PTE
185 * (Linux PTE), the key is to have bits 11..9 all zero. We'd use 0x7
186 * as a universal null entry, but some of those least significant bits
187 * are interpreted by software.
188 */
189#define _NULL_PMD 0x7
190#define _NULL_PTE 0x0
191
192static inline void pmd_clear(pmd_t *pmd_entry_ptr)
193{
194 pmd_val(*pmd_entry_ptr) = _NULL_PMD;
195}
196
197/*
198 * Conveniently, a null PTE value is invalid.
199 */
200static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
201 pte_t *ptep)
202{
203 pte_val(*ptep) = _NULL_PTE;
204}
205
206/**
207 * pmd_none - check if pmd_entry is mapped
208 * @pmd_entry: pmd entry
209 *
210 * MIPS checks it against that "invalid pte table" thing.
211 */
212static inline int pmd_none(pmd_t pmd)
213{
214 return pmd_val(pmd) == _NULL_PMD;
215}
216
217/**
218 * pmd_present - is there a page table behind this?
219 * Essentially the inverse of pmd_none. We maybe
220 * save an inline instruction by defining it this
221 * way, instead of simply "!pmd_none".
222 */
223static inline int pmd_present(pmd_t pmd)
224{
225 return pmd_val(pmd) != (unsigned long)_NULL_PMD;
226}
227
228/**
229 * pmd_bad - check if a PMD entry is "bad". That might mean swapped out.
230 * As we have no known cause of badness, it's null, as it is for many
231 * architectures.
232 */
233static inline int pmd_bad(pmd_t pmd)
234{
235 return 0;
236}
237
238/*
239 * pmd_page - converts a PMD entry to a page pointer
240 */
241#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
242
243/**
244 * pte_none - check if pte is mapped
245 * @pte: pte_t entry
246 */
247static inline int pte_none(pte_t pte)
248{
249 return pte_val(pte) == _NULL_PTE;
250};
251
252/*
253 * pte_present - check if page is present
254 */
255static inline int pte_present(pte_t pte)
256{
257 return pte_val(pte) & _PAGE_PRESENT;
258}
259
260/* mk_pte - make a PTE out of a page pointer and protection bits */
261#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
262
263/* pte_page - returns a page (frame pointer/descriptor?) based on a PTE */
264#define pte_page(x) pfn_to_page(pte_pfn(x))
265
266/* pte_mkold - mark PTE as not recently accessed */
267static inline pte_t pte_mkold(pte_t pte)
268{
269 pte_val(pte) &= ~_PAGE_ACCESSED;
270 return pte;
271}
272
273/* pte_mkyoung - mark PTE as recently accessed */
274static inline pte_t pte_mkyoung(pte_t pte)
275{
276 pte_val(pte) |= _PAGE_ACCESSED;
277 return pte;
278}
279
280/* pte_mkclean - mark page as in sync with backing store */
281static inline pte_t pte_mkclean(pte_t pte)
282{
283 pte_val(pte) &= ~_PAGE_DIRTY;
284 return pte;
285}
286
287/* pte_mkdirty - mark page as modified */
288static inline pte_t pte_mkdirty(pte_t pte)
289{
290 pte_val(pte) |= _PAGE_DIRTY;
291 return pte;
292}
293
294/* pte_young - "is PTE marked as accessed"? */
295static inline int pte_young(pte_t pte)
296{
297 return pte_val(pte) & _PAGE_ACCESSED;
298}
299
300/* pte_dirty - "is PTE dirty?" */
301static inline int pte_dirty(pte_t pte)
302{
303 return pte_val(pte) & _PAGE_DIRTY;
304}
305
306/* pte_modify - set protection bits on PTE */
307static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
308{
309 pte_val(pte) &= PAGE_MASK;
310 pte_val(pte) |= pgprot_val(prot);
311 return pte;
312}
313
314/* pte_wrprotect - mark page as not writable */
315static inline pte_t pte_wrprotect(pte_t pte)
316{
317 pte_val(pte) &= ~_PAGE_WRITE;
318 return pte;
319}
320
321/* pte_mkwrite - mark page as writable */
322static inline pte_t pte_mkwrite(pte_t pte)
323{
324 pte_val(pte) |= _PAGE_WRITE;
325 return pte;
326}
327
328/* pte_mkexec - mark PTE as executable */
329static inline pte_t pte_mkexec(pte_t pte)
330{
331 pte_val(pte) |= _PAGE_EXECUTE;
332 return pte;
333}
334
335/* pte_read - "is PTE marked as readable?" */
336static inline int pte_read(pte_t pte)
337{
338 return pte_val(pte) & _PAGE_READ;
339}
340
341/* pte_write - "is PTE marked as writable?" */
342static inline int pte_write(pte_t pte)
343{
344 return pte_val(pte) & _PAGE_WRITE;
345}
346
347
348/* pte_exec - "is PTE marked as executable?" */
349static inline int pte_exec(pte_t pte)
350{
351 return pte_val(pte) & _PAGE_EXECUTE;
352}
353
354/* __pte_to_swp_entry - extract swap entry from PTE */
355#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
356
357/* __swp_entry_to_pte - extract PTE from swap entry */
358#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
359
360/* pfn_pte - convert page number and protection value to page table entry */
361#define pfn_pte(pfn, pgprot) __pte((pfn << PAGE_SHIFT) | pgprot_val(pgprot))
362
363/* pte_pfn - convert pte to page frame number */
364#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
365#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
366
367/*
368 * set_pte_at - update page table and do whatever magic may be
369 * necessary to make the underlying hardware/firmware take note.
370 *
371 * VM may require a virtual instruction to alert the MMU.
372 */
373#define set_pte_at(mm, addr, ptep, pte) set_pte(ptep, pte)
374
375static inline unsigned long pmd_page_vaddr(pmd_t pmd)
376{
377 return (unsigned long)__va(pmd_val(pmd) & PAGE_MASK);
378}
379
380/* ZERO_PAGE - returns the globally shared zero page */
381#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
382
383/*
384 * Swap/file PTE definitions. If _PAGE_PRESENT is zero, the rest of the PTE is
385 * interpreted as swap information. The remaining free bits are interpreted as
386 * swap type/offset tuple. Rather than have the TLB fill handler test
387 * _PAGE_PRESENT, we're going to reserve the permissions bits and set them to
388 * all zeros for swap entries, which speeds up the miss handler at the cost of
389 * 3 bits of offset. That trade-off can be revisited if necessary, but Hexagon
390 * processor architecture and target applications suggest a lot of TLB misses
391 * and not much swap space.
392 *
393 * Format of swap PTE:
394 * bit 0: Present (zero)
395 * bits 1-5: swap type (arch independent layer uses 5 bits max)
396 * bits 6-9: bits 3:0 of offset
397 * bits 10-12: effectively _PAGE_PROTNONE (all zero)
398 * bits 13-31: bits 22:4 of swap offset
399 *
400 * The split offset makes some of the following macros a little gnarly,
401 * but there's plenty of precedent for this sort of thing.
402 */
403
404/* Used for swap PTEs */
405#define __swp_type(swp_pte) (((swp_pte).val >> 1) & 0x1f)
406
407#define __swp_offset(swp_pte) \
408 ((((swp_pte).val >> 6) & 0xf) | (((swp_pte).val >> 9) & 0x7ffff0))
409
410#define __swp_entry(type, offset) \
411 ((swp_entry_t) { \
412 ((type << 1) | \
413 ((offset & 0x7ffff0) << 9) | ((offset & 0xf) << 6)) })
414
415#endif