Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H
3#define _ASM_POWERPC_NOHASH_32_PGTABLE_H
4
5#include <asm-generic/pgtable-nopmd.h>
6
7#ifndef __ASSEMBLER__
8#include <linux/sched.h>
9#include <linux/threads.h>
10#include <asm/mmu.h> /* For sub-arch specific PPC_PIN_SIZE */
11
12#endif /* __ASSEMBLER__ */
13
14#define PTE_INDEX_SIZE PTE_SHIFT
15#define PMD_INDEX_SIZE 0
16#define PUD_INDEX_SIZE 0
17#define PGD_INDEX_SIZE (32 - PGDIR_SHIFT)
18
19#define PMD_CACHE_INDEX PMD_INDEX_SIZE
20#define PUD_CACHE_INDEX PUD_INDEX_SIZE
21
22#ifndef __ASSEMBLER__
23#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
24#define PMD_TABLE_SIZE 0
25#define PUD_TABLE_SIZE 0
26#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
27
28#define PMD_MASKED_BITS (PTE_TABLE_SIZE - 1)
29#endif /* __ASSEMBLER__ */
30
31#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
32#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
33
34/*
35 * The normal case is that PTEs are 32-bits and we have a 1-page
36 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
37 *
38 * For any >32-bit physical address platform, we can use the following
39 * two level page table layout where the pgdir is 8KB and the MS 13 bits
40 * are an index to the second level table. The combined pgdir/pmd first
41 * level has 2048 entries and the second level has 512 64-bit PTE entries.
42 * -Matt
43 */
44/* PGDIR_SHIFT determines what a top-level page table entry can map */
45#define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
46#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
47#define PGDIR_MASK (~(PGDIR_SIZE-1))
48
49/* Bits to mask out from a PGD to get to the PUD page */
50#define PGD_MASKED_BITS 0
51
52#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
53
54#define pgd_ERROR(e) \
55 pr_err("%s:%d: bad pgd %08llx.\n", __FILE__, __LINE__, (unsigned long long)pgd_val(e))
56
57/*
58 * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
59 * value (for now) on others, from where we can start layout kernel
60 * virtual space that goes below PKMAP and FIXMAP
61 */
62
63#define FIXADDR_SIZE 0
64#ifdef CONFIG_KASAN
65#include <asm/kasan.h>
66#define FIXADDR_TOP (KASAN_SHADOW_START - PAGE_SIZE)
67#else
68#define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE))
69#endif
70
71/*
72 * ioremap_bot starts at that address. Early ioremaps move down from there,
73 * until mem_init() at which point this becomes the top of the vmalloc
74 * and ioremap space
75 */
76#ifdef CONFIG_HIGHMEM
77#define IOREMAP_TOP PKMAP_BASE
78#else
79#define IOREMAP_TOP FIXADDR_START
80#endif
81
82/* PPC32 shares vmalloc area with ioremap */
83#define IOREMAP_START VMALLOC_START
84#define IOREMAP_END VMALLOC_END
85
86/*
87 * Just any arbitrary offset to the start of the vmalloc VM area: the
88 * current 16MB value just means that there will be a 64MB "hole" after the
89 * physical memory until the kernel virtual memory starts. That means that
90 * any out-of-bounds memory accesses will hopefully be caught.
91 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
92 * area for the same reason. ;)
93 *
94 * We no longer map larger than phys RAM with the BATs so we don't have
95 * to worry about the VMALLOC_OFFSET causing problems. We do have to worry
96 * about clashes between our early calls to ioremap() that start growing down
97 * from IOREMAP_TOP being run into the VM area allocations (growing upwards
98 * from VMALLOC_START). For this reason we have ioremap_bot to check when
99 * we actually run into our mappings setup in the early boot with the VM
100 * system. This really does become a problem for machines with good amounts
101 * of RAM. -- Cort
102 */
103#define VMALLOC_OFFSET (0x1000000) /* 16M */
104#ifdef PPC_PIN_SIZE
105#define VMALLOC_START (((ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
106#else
107#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
108#endif
109
110#ifdef CONFIG_KASAN_VMALLOC
111#define VMALLOC_END ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
112#else
113#define VMALLOC_END ioremap_bot
114#endif
115
116/*
117 * Bits in a linux-style PTE. These match the bits in the
118 * (hardware-defined) PowerPC PTE as closely as possible.
119 */
120
121#if defined(CONFIG_44x)
122#include <asm/nohash/32/pte-44x.h>
123#elif defined(CONFIG_PPC_85xx)
124#include <asm/nohash/pte-e500.h>
125#elif defined(CONFIG_PPC_8xx)
126#include <asm/nohash/32/pte-8xx.h>
127#endif
128
129/*
130 * Location of the PFN in the PTE. Most 32-bit platforms use the same
131 * as _PAGE_SHIFT here (ie, naturally aligned).
132 * Platform who don't just pre-define the value so we don't override it here.
133 */
134#ifndef PTE_RPN_SHIFT
135#define PTE_RPN_SHIFT (PAGE_SHIFT)
136#endif
137
138/*
139 * The mask covered by the RPN must be a ULL on 32-bit platforms with
140 * 64-bit PTEs.
141 */
142#ifdef CONFIG_PTE_64BIT
143#define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1))
144#define MAX_POSSIBLE_PHYSMEM_BITS 36
145#else
146#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
147#define MAX_POSSIBLE_PHYSMEM_BITS 32
148#endif
149
150#ifndef __ASSEMBLER__
151
152#define pmd_none(pmd) (!pmd_val(pmd))
153#define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
154#define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK)
155static inline void pmd_clear(pmd_t *pmdp)
156{
157 *pmdp = __pmd(0);
158}
159
160/*
161 * Note that on Book E processors, the pmd contains the kernel virtual
162 * (lowmem) address of the pte page. The physical address is less useful
163 * because everything runs with translation enabled (even the TLB miss
164 * handler). On everything else the pmd contains the physical address
165 * of the pte page. -- paulus
166 */
167#ifndef CONFIG_BOOKE
168#define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT)
169#else
170#define pmd_page_vaddr(pmd) \
171 ((const void *)((unsigned long)pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
172#define pmd_pfn(pmd) (__pa(pmd_val(pmd)) >> PAGE_SHIFT)
173#endif
174
175#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
176
177/*
178 * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
179 * are !pte_none() && !pte_present().
180 *
181 * Format of swap PTEs (32bit PTEs):
182 *
183 * 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3
184 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
185 * <------------------ offset -------------------> < type -> E 0 0
186 *
187 * E is the exclusive marker that is not stored in swap entries.
188 *
189 * For 64bit PTEs, the offset is extended by 32bit.
190 */
191#define __swp_type(entry) ((entry).val & 0x1f)
192#define __swp_offset(entry) ((entry).val >> 5)
193#define __swp_entry(type, offset) ((swp_entry_t) { ((type) & 0x1f) | ((offset) << 5) })
194#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
195#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
196
197/* We borrow LSB 2 to store the exclusive marker in swap PTEs. */
198#define _PAGE_SWP_EXCLUSIVE 0x000004
199
200#endif /* !__ASSEMBLER__ */
201
202#endif /* __ASM_POWERPC_NOHASH_32_PGTABLE_H */