Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_POWERPC_PGTABLE_H
3#define _ASM_POWERPC_PGTABLE_H
4
5#ifndef __ASSEMBLY__
6#include <linux/mmdebug.h>
7#include <linux/mmzone.h>
8#include <asm/processor.h> /* For TASK_SIZE */
9#include <asm/mmu.h>
10#include <asm/page.h>
11#include <asm/tlbflush.h>
12
13struct mm_struct;
14
15#endif /* !__ASSEMBLY__ */
16
17#ifdef CONFIG_PPC_BOOK3S
18#include <asm/book3s/pgtable.h>
19#else
20#include <asm/nohash/pgtable.h>
21#endif /* !CONFIG_PPC_BOOK3S */
22
23/* Note due to the way vm flags are laid out, the bits are XWR */
24#define __P000 PAGE_NONE
25#define __P001 PAGE_READONLY
26#define __P010 PAGE_COPY
27#define __P011 PAGE_COPY
28#define __P100 PAGE_READONLY_X
29#define __P101 PAGE_READONLY_X
30#define __P110 PAGE_COPY_X
31#define __P111 PAGE_COPY_X
32
33#define __S000 PAGE_NONE
34#define __S001 PAGE_READONLY
35#define __S010 PAGE_SHARED
36#define __S011 PAGE_SHARED
37#define __S100 PAGE_READONLY_X
38#define __S101 PAGE_READONLY_X
39#define __S110 PAGE_SHARED_X
40#define __S111 PAGE_SHARED_X
41
42#ifndef __ASSEMBLY__
43
44#include <asm/tlbflush.h>
45
46/* Keep these as a macros to avoid include dependency mess */
47#define pte_page(x) pfn_to_page(pte_pfn(x))
48#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
49/*
50 * Select all bits except the pfn
51 */
52static inline pgprot_t pte_pgprot(pte_t pte)
53{
54 unsigned long pte_flags;
55
56 pte_flags = pte_val(pte) & ~PTE_RPN_MASK;
57 return __pgprot(pte_flags);
58}
59
60/*
61 * ZERO_PAGE is a global shared page that is always zero: used
62 * for zero-mapped memory areas etc..
63 */
64extern unsigned long empty_zero_page[];
65#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
66
67extern pgd_t swapper_pg_dir[];
68
69extern void paging_init(void);
70
71/*
72 * kern_addr_valid is intended to indicate whether an address is a valid
73 * kernel address. Most 32-bit archs define it as always true (like this)
74 * but most 64-bit archs actually perform a test. What should we do here?
75 */
76#define kern_addr_valid(addr) (1)
77
78#include <asm-generic/pgtable.h>
79
80
81/*
82 * This gets called at the end of handling a page fault, when
83 * the kernel has put a new PTE into the page table for the process.
84 * We use it to ensure coherency between the i-cache and d-cache
85 * for the page which has just been mapped in.
86 * On machines which use an MMU hash table, we use this to put a
87 * corresponding HPTE into the hash table ahead of time, instead of
88 * waiting for the inevitable extra hash-table miss exception.
89 */
90extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
91
92extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
93 unsigned long end, int write,
94 struct page **pages, int *nr);
95#ifndef CONFIG_TRANSPARENT_HUGEPAGE
96#define pmd_large(pmd) 0
97#endif
98
99/* can we use this in kvm */
100unsigned long vmalloc_to_phys(void *vmalloc_addr);
101
102void pgtable_cache_add(unsigned int shift);
103void pgtable_cache_init(void);
104
105#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32)
106void mark_initmem_nx(void);
107#else
108static inline void mark_initmem_nx(void) { }
109#endif
110
111/*
112 * When used, PTE_FRAG_NR is defined in subarch pgtable.h
113 * so we are sure it is included when arriving here.
114 */
115#ifdef PTE_FRAG_NR
116static inline void *pte_frag_get(mm_context_t *ctx)
117{
118 return ctx->pte_frag;
119}
120
121static inline void pte_frag_set(mm_context_t *ctx, void *p)
122{
123 ctx->pte_frag = p;
124}
125#else
126#define PTE_FRAG_NR 1
127#define PTE_FRAG_SIZE_SHIFT PAGE_SHIFT
128#define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT)
129
130static inline void *pte_frag_get(mm_context_t *ctx)
131{
132 return NULL;
133}
134
135static inline void pte_frag_set(mm_context_t *ctx, void *p)
136{
137}
138#endif
139
140#endif /* __ASSEMBLY__ */
141
142#endif /* _ASM_POWERPC_PGTABLE_H */