Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * S390 version
4 * Copyright IBM Corp. 1999, 2000
5 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 * Ulrich Weigand (weigand@de.ibm.com)
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 *
9 * Derived from "include/asm-i386/pgtable.h"
10 */
11
12#ifndef _ASM_S390_PGTABLE_H
13#define _ASM_S390_PGTABLE_H
14
15#include <linux/sched.h>
16#include <linux/mm_types.h>
17#include <linux/cpufeature.h>
18#include <linux/page-flags.h>
19#include <linux/radix-tree.h>
20#include <linux/atomic.h>
21#include <asm/ctlreg.h>
22#include <asm/bug.h>
23#include <asm/page.h>
24#include <asm/uv.h>
25
26extern pgd_t swapper_pg_dir[];
27extern pgd_t invalid_pg_dir[];
28extern void paging_init(void);
29extern struct ctlreg s390_invalid_asce;
30
31enum {
32 PG_DIRECT_MAP_4K = 0,
33 PG_DIRECT_MAP_1M,
34 PG_DIRECT_MAP_2G,
35 PG_DIRECT_MAP_MAX
36};
37
38extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
39
40static inline void update_page_count(int level, long count)
41{
42 if (IS_ENABLED(CONFIG_PROC_FS))
43 atomic_long_add(count, &direct_pages_count[level]);
44}
45
46/*
47 * The S390 doesn't have any external MMU info: the kernel page
48 * tables contain all the necessary information.
49 */
50#define update_mmu_cache(vma, address, ptep) do { } while (0)
51#define update_mmu_cache_range(vmf, vma, addr, ptep, nr) do { } while (0)
52#define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
53
54/*
55 * ZERO_PAGE is a global shared page that is always zero; used
56 * for zero-mapped memory areas etc..
57 */
58
59extern unsigned long empty_zero_page;
60extern unsigned long zero_page_mask;
61
62#define ZERO_PAGE(vaddr) \
63 (virt_to_page((void *)(empty_zero_page + \
64 (((unsigned long)(vaddr)) &zero_page_mask))))
65#define __HAVE_COLOR_ZERO_PAGE
66
67/* TODO: s390 cannot support io_remap_pfn_range... */
68
69#define pte_ERROR(e) \
70 pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
71#define pmd_ERROR(e) \
72 pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
73#define pud_ERROR(e) \
74 pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
75#define p4d_ERROR(e) \
76 pr_err("%s:%d: bad p4d %016lx.\n", __FILE__, __LINE__, p4d_val(e))
77#define pgd_ERROR(e) \
78 pr_err("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
79
80/*
81 * The vmalloc and module area will always be on the topmost area of the
82 * kernel mapping. 512GB are reserved for vmalloc by default.
83 * At the top of the vmalloc area a 2GB area is reserved where modules
84 * will reside. That makes sure that inter module branches always
85 * happen without trampolines and in addition the placement within a
86 * 2GB frame is branch prediction unit friendly.
87 */
88extern unsigned long VMALLOC_START;
89extern unsigned long VMALLOC_END;
90#define VMALLOC_DEFAULT_SIZE ((512UL << 30) - MODULES_LEN)
91extern struct page *vmemmap;
92extern unsigned long vmemmap_size;
93
94extern unsigned long MODULES_VADDR;
95extern unsigned long MODULES_END;
96#define MODULES_VADDR MODULES_VADDR
97#define MODULES_END MODULES_END
98#define MODULES_LEN (1UL << 31)
99
100static inline int is_module_addr(void *addr)
101{
102 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
103 if (addr < (void *)MODULES_VADDR)
104 return 0;
105 if (addr > (void *)MODULES_END)
106 return 0;
107 return 1;
108}
109
110#ifdef CONFIG_KMSAN
111#define KMSAN_VMALLOC_SIZE (VMALLOC_END - VMALLOC_START)
112#define KMSAN_VMALLOC_SHADOW_START VMALLOC_END
113#define KMSAN_VMALLOC_SHADOW_END (KMSAN_VMALLOC_SHADOW_START + KMSAN_VMALLOC_SIZE)
114#define KMSAN_VMALLOC_ORIGIN_START KMSAN_VMALLOC_SHADOW_END
115#define KMSAN_VMALLOC_ORIGIN_END (KMSAN_VMALLOC_ORIGIN_START + KMSAN_VMALLOC_SIZE)
116#define KMSAN_MODULES_SHADOW_START KMSAN_VMALLOC_ORIGIN_END
117#define KMSAN_MODULES_SHADOW_END (KMSAN_MODULES_SHADOW_START + MODULES_LEN)
118#define KMSAN_MODULES_ORIGIN_START KMSAN_MODULES_SHADOW_END
119#define KMSAN_MODULES_ORIGIN_END (KMSAN_MODULES_ORIGIN_START + MODULES_LEN)
120#endif
121
122#ifdef CONFIG_RANDOMIZE_BASE
123#define KASLR_LEN (1UL << 31)
124#else
125#define KASLR_LEN 0UL
126#endif
127
128void setup_protection_map(void);
129
130/*
131 * A 64 bit pagetable entry of S390 has following format:
132 * | PFRA |0IPC| OS |
133 * 0000000000111111111122222222223333333333444444444455555555556666
134 * 0123456789012345678901234567890123456789012345678901234567890123
135 *
136 * I Page-Invalid Bit: Page is not available for address-translation
137 * P Page-Protection Bit: Store access not possible for page
138 * C Change-bit override: HW is not required to set change bit
139 *
140 * A 64 bit segmenttable entry of S390 has following format:
141 * | P-table origin | TT
142 * 0000000000111111111122222222223333333333444444444455555555556666
143 * 0123456789012345678901234567890123456789012345678901234567890123
144 *
145 * I Segment-Invalid Bit: Segment is not available for address-translation
146 * C Common-Segment Bit: Segment is not private (PoP 3-30)
147 * P Page-Protection Bit: Store access not possible for page
148 * TT Type 00
149 *
150 * A 64 bit region table entry of S390 has following format:
151 * | S-table origin | TF TTTL
152 * 0000000000111111111122222222223333333333444444444455555555556666
153 * 0123456789012345678901234567890123456789012345678901234567890123
154 *
155 * I Segment-Invalid Bit: Segment is not available for address-translation
156 * TT Type 01
157 * TF
158 * TL Table length
159 *
160 * The 64 bit regiontable origin of S390 has following format:
161 * | region table origon | DTTL
162 * 0000000000111111111122222222223333333333444444444455555555556666
163 * 0123456789012345678901234567890123456789012345678901234567890123
164 *
165 * X Space-Switch event:
166 * G Segment-Invalid Bit:
167 * P Private-Space Bit:
168 * S Storage-Alteration:
169 * R Real space
170 * TL Table-Length:
171 *
172 * A storage key has the following format:
173 * | ACC |F|R|C|0|
174 * 0 3 4 5 6 7
175 * ACC: access key
176 * F : fetch protection bit
177 * R : referenced bit
178 * C : changed bit
179 */
180
181/* Hardware bits in the page table entry */
182#define _PAGE_NOEXEC 0x100 /* HW no-execute bit */
183#define _PAGE_PROTECT 0x200 /* HW read-only bit */
184#define _PAGE_INVALID 0x400 /* HW invalid bit */
185#define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
186
187/* Software bits in the page table entry */
188#define _PAGE_PRESENT 0x001 /* SW pte present bit */
189#define _PAGE_YOUNG 0x004 /* SW pte young bit */
190#define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
191#define _PAGE_READ 0x010 /* SW pte read bit */
192#define _PAGE_WRITE 0x020 /* SW pte write bit */
193#define _PAGE_SPECIAL 0x040 /* SW associated with special page */
194#define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */
195
196#ifdef CONFIG_MEM_SOFT_DIRTY
197#define _PAGE_SOFT_DIRTY 0x002 /* SW pte soft dirty bit */
198#else
199#define _PAGE_SOFT_DIRTY 0x000
200#endif
201
202#define _PAGE_SW_BITS 0xffUL /* All SW bits */
203
204#define _PAGE_SWP_EXCLUSIVE _PAGE_LARGE /* SW pte exclusive swap bit */
205
206/* Set of bits not changed in pte_modify */
207#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
208 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
209
210/*
211 * Mask of bits that must not be changed with RDP. Allow only _PAGE_PROTECT
212 * HW bit and all SW bits.
213 */
214#define _PAGE_RDP_MASK ~(_PAGE_PROTECT | _PAGE_SW_BITS)
215
216/*
217 * handle_pte_fault uses pte_present and pte_none to find out the pte type
218 * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
219 * distinguish present from not-present ptes. It is changed only with the page
220 * table lock held.
221 *
222 * The following table gives the different possible bit combinations for
223 * the pte hardware and software bits in the last 12 bits of a pte
224 * (. unassigned bit, x don't care, t swap type):
225 *
226 * 842100000000
227 * 000084210000
228 * 000000008421
229 * .IR.uswrdy.p
230 * empty .10.00000000
231 * swap .11..ttttt.0
232 * prot-none, clean, old .11.xx0000.1
233 * prot-none, clean, young .11.xx0001.1
234 * prot-none, dirty, old .11.xx0010.1
235 * prot-none, dirty, young .11.xx0011.1
236 * read-only, clean, old .11.xx0100.1
237 * read-only, clean, young .01.xx0101.1
238 * read-only, dirty, old .11.xx0110.1
239 * read-only, dirty, young .01.xx0111.1
240 * read-write, clean, old .11.xx1100.1
241 * read-write, clean, young .01.xx1101.1
242 * read-write, dirty, old .10.xx1110.1
243 * read-write, dirty, young .00.xx1111.1
244 * HW-bits: R read-only, I invalid
245 * SW-bits: p present, y young, d dirty, r read, w write, s special,
246 * u unused, l large
247 *
248 * pte_none is true for the bit pattern .10.00000000, pte == 0x400
249 * pte_swap is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
250 * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
251 */
252
253/* Bits in the segment/region table address-space-control-element */
254#define _ASCE_ORIGIN ~0xfffUL/* region/segment table origin */
255#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
256#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
257#define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
258#define _ASCE_REAL_SPACE 0x20 /* real space control */
259#define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
260#define _ASCE_TYPE_REGION1 0x0c /* region first table type */
261#define _ASCE_TYPE_REGION2 0x08 /* region second table type */
262#define _ASCE_TYPE_REGION3 0x04 /* region third table type */
263#define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
264#define _ASCE_TABLE_LENGTH 0x03 /* region table length */
265
266/* Bits in the region table entry */
267#define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
268#define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
269#define _REGION_ENTRY_NOEXEC 0x100 /* region no-execute bit */
270#define _REGION_ENTRY_OFFSET 0xc0 /* region table offset */
271#define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
272#define _REGION_ENTRY_TYPE_MASK 0x0c /* region table type mask */
273#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
274#define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
275#define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
276#define _REGION_ENTRY_LENGTH 0x03 /* region third length */
277
278#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
279#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
280#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
281#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
282#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH | \
283 _REGION3_ENTRY_PRESENT)
284#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
285
286#define _REGION3_ENTRY_HARDWARE_BITS 0xfffffffffffff6ffUL
287#define _REGION3_ENTRY_HARDWARE_BITS_LARGE 0xffffffff8001073cUL
288#define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address */
289#define _REGION3_ENTRY_DIRTY 0x2000 /* SW region dirty bit */
290#define _REGION3_ENTRY_YOUNG 0x1000 /* SW region young bit */
291#define _REGION3_ENTRY_COMM 0x0010 /* Common-Region, marks swap entry */
292#define _REGION3_ENTRY_LARGE 0x0400 /* RTTE-format control, large page */
293#define _REGION3_ENTRY_WRITE 0x8000 /* SW region write bit */
294#define _REGION3_ENTRY_READ 0x4000 /* SW region read bit */
295
296#ifdef CONFIG_MEM_SOFT_DIRTY
297#define _REGION3_ENTRY_SOFT_DIRTY 0x0002 /* SW region soft dirty bit */
298#else
299#define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */
300#endif
301
302#define _REGION_ENTRY_BITS 0xfffffffffffff22fUL
303
304/*
305 * SW region present bit. For non-leaf region-third-table entries, bits 62-63
306 * indicate the TABLE LENGTH and both must be set to 1. But such entries
307 * would always be considered as present, so it is safe to use bit 63 as
308 * PRESENT bit for PUD.
309 */
310#define _REGION3_ENTRY_PRESENT 0x0001
311
312/* Bits in the segment table entry */
313#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe3fUL
314#define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe3cUL
315#define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff1073cUL
316#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
317#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* page table origin */
318#define _SEGMENT_ENTRY_PROTECT 0x200 /* segment protection bit */
319#define _SEGMENT_ENTRY_NOEXEC 0x100 /* segment no-execute bit */
320#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
321#define _SEGMENT_ENTRY_TYPE_MASK 0x0c /* segment table type mask */
322
323#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PRESENT)
324#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
325
326#define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */
327#define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
328
329#define _SEGMENT_ENTRY_COMM 0x0010 /* Common-Segment, marks swap entry */
330#define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
331#define _SEGMENT_ENTRY_WRITE 0x8000 /* SW segment write bit */
332#define _SEGMENT_ENTRY_READ 0x4000 /* SW segment read bit */
333
334#ifdef CONFIG_MEM_SOFT_DIRTY
335#define _SEGMENT_ENTRY_SOFT_DIRTY 0x0002 /* SW segment soft dirty bit */
336#else
337#define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */
338#endif
339
340#define _SEGMENT_ENTRY_PRESENT 0x0001 /* SW segment present bit */
341
342/* Common bits in region and segment table entries, for swap entries */
343#define _RST_ENTRY_COMM 0x0010 /* Common-Region/Segment, marks swap entry */
344#define _RST_ENTRY_INVALID 0x0020 /* invalid region/segment table entry */
345
346#define _CRST_ENTRIES 2048 /* number of region/segment table entries */
347#define _PAGE_ENTRIES 256 /* number of page table entries */
348
349#define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8)
350#define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8)
351
352#define _REGION1_SHIFT 53
353#define _REGION2_SHIFT 42
354#define _REGION3_SHIFT 31
355#define _SEGMENT_SHIFT 20
356
357#define _REGION1_INDEX (0x7ffUL << _REGION1_SHIFT)
358#define _REGION2_INDEX (0x7ffUL << _REGION2_SHIFT)
359#define _REGION3_INDEX (0x7ffUL << _REGION3_SHIFT)
360#define _SEGMENT_INDEX (0x7ffUL << _SEGMENT_SHIFT)
361#define _PAGE_INDEX (0xffUL << PAGE_SHIFT)
362
363#define _REGION1_SIZE (1UL << _REGION1_SHIFT)
364#define _REGION2_SIZE (1UL << _REGION2_SHIFT)
365#define _REGION3_SIZE (1UL << _REGION3_SHIFT)
366#define _SEGMENT_SIZE (1UL << _SEGMENT_SHIFT)
367
368#define _REGION1_MASK (~(_REGION1_SIZE - 1))
369#define _REGION2_MASK (~(_REGION2_SIZE - 1))
370#define _REGION3_MASK (~(_REGION3_SIZE - 1))
371#define _SEGMENT_MASK (~(_SEGMENT_SIZE - 1))
372
373#define PMD_SHIFT _SEGMENT_SHIFT
374#define PUD_SHIFT _REGION3_SHIFT
375#define P4D_SHIFT _REGION2_SHIFT
376#define PGDIR_SHIFT _REGION1_SHIFT
377
378#define PMD_SIZE _SEGMENT_SIZE
379#define PUD_SIZE _REGION3_SIZE
380#define P4D_SIZE _REGION2_SIZE
381#define PGDIR_SIZE _REGION1_SIZE
382
383#define PMD_MASK _SEGMENT_MASK
384#define PUD_MASK _REGION3_MASK
385#define P4D_MASK _REGION2_MASK
386#define PGDIR_MASK _REGION1_MASK
387
388#define PTRS_PER_PTE _PAGE_ENTRIES
389#define PTRS_PER_PMD _CRST_ENTRIES
390#define PTRS_PER_PUD _CRST_ENTRIES
391#define PTRS_PER_P4D _CRST_ENTRIES
392#define PTRS_PER_PGD _CRST_ENTRIES
393
394/*
395 * Segment table and region3 table entry encoding
396 * (R = read-only, I = invalid, y = young bit):
397 * dy..R...I...wr
398 * prot-none, clean, old 00..1...1...00
399 * prot-none, clean, young 01..1...1...00
400 * prot-none, dirty, old 10..1...1...00
401 * prot-none, dirty, young 11..1...1...00
402 * read-only, clean, old 00..1...1...01
403 * read-only, clean, young 01..1...0...01
404 * read-only, dirty, old 10..1...1...01
405 * read-only, dirty, young 11..1...0...01
406 * read-write, clean, old 00..1...1...11
407 * read-write, clean, young 01..1...0...11
408 * read-write, dirty, old 10..0...1...11
409 * read-write, dirty, young 11..0...0...11
410 * The segment table origin is used to distinguish empty (origin==0) from
411 * read-write, old segment table entries (origin!=0)
412 * HW-bits: R read-only, I invalid
413 * SW-bits: y young, d dirty, r read, w write
414 */
415
416/* Page status table bits for virtualization */
417#define PGSTE_ACC_BITS 0xf000000000000000UL
418#define PGSTE_FP_BIT 0x0800000000000000UL
419#define PGSTE_PCL_BIT 0x0080000000000000UL
420#define PGSTE_HR_BIT 0x0040000000000000UL
421#define PGSTE_HC_BIT 0x0020000000000000UL
422#define PGSTE_GR_BIT 0x0004000000000000UL
423#define PGSTE_GC_BIT 0x0002000000000000UL
424#define PGSTE_ST2_MASK 0x0000ffff00000000UL
425#define PGSTE_UC_BIT 0x0000000000008000UL /* user dirty (migration) */
426#define PGSTE_IN_BIT 0x0000000000004000UL /* IPTE notify bit */
427#define PGSTE_VSIE_BIT 0x0000000000002000UL /* ref'd in a shadow table */
428
429/* Guest Page State used for virtualization */
430#define _PGSTE_GPS_ZERO 0x0000000080000000UL
431#define _PGSTE_GPS_NODAT 0x0000000040000000UL
432#define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
433#define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
434#define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
435#define _PGSTE_GPS_USAGE_POT_VOLATILE 0x0000000002000000UL
436#define _PGSTE_GPS_USAGE_VOLATILE _PGSTE_GPS_USAGE_MASK
437
438/*
439 * A user page table pointer has the space-switch-event bit, the
440 * private-space-control bit and the storage-alteration-event-control
441 * bit set. A kernel page table pointer doesn't need them.
442 */
443#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
444 _ASCE_ALT_EVENT)
445
446/*
447 * Page protection definitions.
448 */
449#define __PAGE_NONE (_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
450#define __PAGE_RO (_PAGE_PRESENT | _PAGE_READ | \
451 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
452#define __PAGE_RX (_PAGE_PRESENT | _PAGE_READ | \
453 _PAGE_INVALID | _PAGE_PROTECT)
454#define __PAGE_RW (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
455 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
456#define __PAGE_RWX (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
457 _PAGE_INVALID | _PAGE_PROTECT)
458#define __PAGE_SHARED (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
459 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
460#define __PAGE_KERNEL (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
461 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
462#define __PAGE_KERNEL_RO (_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
463 _PAGE_PROTECT | _PAGE_NOEXEC)
464
465extern unsigned long page_noexec_mask;
466
467#define __pgprot_page_mask(x) __pgprot((x) & page_noexec_mask)
468
469#define PAGE_NONE __pgprot_page_mask(__PAGE_NONE)
470#define PAGE_RO __pgprot_page_mask(__PAGE_RO)
471#define PAGE_RX __pgprot_page_mask(__PAGE_RX)
472#define PAGE_RW __pgprot_page_mask(__PAGE_RW)
473#define PAGE_RWX __pgprot_page_mask(__PAGE_RWX)
474#define PAGE_SHARED __pgprot_page_mask(__PAGE_SHARED)
475#define PAGE_KERNEL __pgprot_page_mask(__PAGE_KERNEL)
476#define PAGE_KERNEL_RO __pgprot_page_mask(__PAGE_KERNEL_RO)
477
478/*
479 * Segment entry (large page) protection definitions.
480 */
481#define __SEGMENT_NONE (_SEGMENT_ENTRY_PRESENT | \
482 _SEGMENT_ENTRY_INVALID | \
483 _SEGMENT_ENTRY_PROTECT)
484#define __SEGMENT_RO (_SEGMENT_ENTRY_PRESENT | \
485 _SEGMENT_ENTRY_PROTECT | \
486 _SEGMENT_ENTRY_READ | \
487 _SEGMENT_ENTRY_NOEXEC)
488#define __SEGMENT_RX (_SEGMENT_ENTRY_PRESENT | \
489 _SEGMENT_ENTRY_PROTECT | \
490 _SEGMENT_ENTRY_READ)
491#define __SEGMENT_RW (_SEGMENT_ENTRY_PRESENT | \
492 _SEGMENT_ENTRY_READ | \
493 _SEGMENT_ENTRY_WRITE | \
494 _SEGMENT_ENTRY_NOEXEC)
495#define __SEGMENT_RWX (_SEGMENT_ENTRY_PRESENT | \
496 _SEGMENT_ENTRY_READ | \
497 _SEGMENT_ENTRY_WRITE)
498#define __SEGMENT_KERNEL (_SEGMENT_ENTRY | \
499 _SEGMENT_ENTRY_LARGE | \
500 _SEGMENT_ENTRY_READ | \
501 _SEGMENT_ENTRY_WRITE | \
502 _SEGMENT_ENTRY_YOUNG | \
503 _SEGMENT_ENTRY_DIRTY | \
504 _SEGMENT_ENTRY_NOEXEC)
505#define __SEGMENT_KERNEL_RO (_SEGMENT_ENTRY | \
506 _SEGMENT_ENTRY_LARGE | \
507 _SEGMENT_ENTRY_READ | \
508 _SEGMENT_ENTRY_YOUNG | \
509 _SEGMENT_ENTRY_PROTECT | \
510 _SEGMENT_ENTRY_NOEXEC)
511
512extern unsigned long segment_noexec_mask;
513
514#define __pgprot_segment_mask(x) __pgprot((x) & segment_noexec_mask)
515
516#define SEGMENT_NONE __pgprot_segment_mask(__SEGMENT_NONE)
517#define SEGMENT_RO __pgprot_segment_mask(__SEGMENT_RO)
518#define SEGMENT_RX __pgprot_segment_mask(__SEGMENT_RX)
519#define SEGMENT_RW __pgprot_segment_mask(__SEGMENT_RW)
520#define SEGMENT_RWX __pgprot_segment_mask(__SEGMENT_RWX)
521#define SEGMENT_KERNEL __pgprot_segment_mask(__SEGMENT_KERNEL)
522#define SEGMENT_KERNEL_RO __pgprot_segment_mask(__SEGMENT_KERNEL_RO)
523
524/*
525 * Region3 entry (large page) protection definitions.
526 */
527
528#define __REGION3_KERNEL (_REGION_ENTRY_TYPE_R3 | \
529 _REGION3_ENTRY_PRESENT | \
530 _REGION3_ENTRY_LARGE | \
531 _REGION3_ENTRY_READ | \
532 _REGION3_ENTRY_WRITE | \
533 _REGION3_ENTRY_YOUNG | \
534 _REGION3_ENTRY_DIRTY | \
535 _REGION_ENTRY_NOEXEC)
536#define __REGION3_KERNEL_RO (_REGION_ENTRY_TYPE_R3 | \
537 _REGION3_ENTRY_PRESENT | \
538 _REGION3_ENTRY_LARGE | \
539 _REGION3_ENTRY_READ | \
540 _REGION3_ENTRY_YOUNG | \
541 _REGION_ENTRY_PROTECT | \
542 _REGION_ENTRY_NOEXEC)
543
544extern unsigned long region_noexec_mask;
545
546#define __pgprot_region_mask(x) __pgprot((x) & region_noexec_mask)
547
548#define REGION3_KERNEL __pgprot_region_mask(__REGION3_KERNEL)
549#define REGION3_KERNEL_RO __pgprot_region_mask(__REGION3_KERNEL_RO)
550
551static inline bool mm_p4d_folded(struct mm_struct *mm)
552{
553 return mm->context.asce_limit <= _REGION1_SIZE;
554}
555#define mm_p4d_folded(mm) mm_p4d_folded(mm)
556
557static inline bool mm_pud_folded(struct mm_struct *mm)
558{
559 return mm->context.asce_limit <= _REGION2_SIZE;
560}
561#define mm_pud_folded(mm) mm_pud_folded(mm)
562
563static inline bool mm_pmd_folded(struct mm_struct *mm)
564{
565 return mm->context.asce_limit <= _REGION3_SIZE;
566}
567#define mm_pmd_folded(mm) mm_pmd_folded(mm)
568
569static inline int mm_has_pgste(struct mm_struct *mm)
570{
571#ifdef CONFIG_PGSTE
572 if (unlikely(mm->context.has_pgste))
573 return 1;
574#endif
575 return 0;
576}
577
578static inline int mm_is_protected(struct mm_struct *mm)
579{
580#ifdef CONFIG_PGSTE
581 if (unlikely(atomic_read(&mm->context.protected_count)))
582 return 1;
583#endif
584 return 0;
585}
586
587static inline pgste_t clear_pgste_bit(pgste_t pgste, unsigned long mask)
588{
589 return __pgste(pgste_val(pgste) & ~mask);
590}
591
592static inline pgste_t set_pgste_bit(pgste_t pgste, unsigned long mask)
593{
594 return __pgste(pgste_val(pgste) | mask);
595}
596
597static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
598{
599 return __pte(pte_val(pte) & ~pgprot_val(prot));
600}
601
602static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
603{
604 return __pte(pte_val(pte) | pgprot_val(prot));
605}
606
607static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot)
608{
609 return __pmd(pmd_val(pmd) & ~pgprot_val(prot));
610}
611
612static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
613{
614 return __pmd(pmd_val(pmd) | pgprot_val(prot));
615}
616
617static inline pud_t clear_pud_bit(pud_t pud, pgprot_t prot)
618{
619 return __pud(pud_val(pud) & ~pgprot_val(prot));
620}
621
622static inline pud_t set_pud_bit(pud_t pud, pgprot_t prot)
623{
624 return __pud(pud_val(pud) | pgprot_val(prot));
625}
626
627/*
628 * As soon as the guest uses storage keys or enables PV, we deduplicate all
629 * mapped shared zeropages and prevent new shared zeropages from getting
630 * mapped.
631 */
632#define mm_forbids_zeropage mm_forbids_zeropage
633static inline int mm_forbids_zeropage(struct mm_struct *mm)
634{
635#ifdef CONFIG_PGSTE
636 if (!mm->context.allow_cow_sharing)
637 return 1;
638#endif
639 return 0;
640}
641
642static inline int mm_uses_skeys(struct mm_struct *mm)
643{
644#ifdef CONFIG_PGSTE
645 if (mm->context.uses_skeys)
646 return 1;
647#endif
648 return 0;
649}
650
651static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new)
652{
653 union register_pair r1 = { .even = old, .odd = new, };
654 unsigned long address = (unsigned long)ptr | 1;
655
656 asm volatile(
657 " csp %[r1],%[address]"
658 : [r1] "+&d" (r1.pair), "+m" (*ptr)
659 : [address] "d" (address)
660 : "cc");
661}
662
663/**
664 * cspg() - Compare and Swap and Purge (CSPG)
665 * @ptr: Pointer to the value to be exchanged
666 * @old: The expected old value
667 * @new: The new value
668 *
669 * Return: True if compare and swap was successful, otherwise false.
670 */
671static inline bool cspg(unsigned long *ptr, unsigned long old, unsigned long new)
672{
673 union register_pair r1 = { .even = old, .odd = new, };
674 unsigned long address = (unsigned long)ptr | 1;
675
676 asm volatile(
677 " cspg %[r1],%[address]"
678 : [r1] "+&d" (r1.pair), "+m" (*ptr)
679 : [address] "d" (address)
680 : "cc");
681 return old == r1.even;
682}
683
684#define CRDTE_DTT_PAGE 0x00UL
685#define CRDTE_DTT_SEGMENT 0x10UL
686#define CRDTE_DTT_REGION3 0x14UL
687#define CRDTE_DTT_REGION2 0x18UL
688#define CRDTE_DTT_REGION1 0x1cUL
689
690/**
691 * crdte() - Compare and Replace DAT Table Entry
692 * @old: The expected old value
693 * @new: The new value
694 * @table: Pointer to the value to be exchanged
695 * @dtt: Table type of the table to be exchanged
696 * @address: The address mapped by the entry to be replaced
697 * @asce: The ASCE of this entry
698 *
699 * Return: True if compare and replace was successful, otherwise false.
700 */
701static inline bool crdte(unsigned long old, unsigned long new,
702 unsigned long *table, unsigned long dtt,
703 unsigned long address, unsigned long asce)
704{
705 union register_pair r1 = { .even = old, .odd = new, };
706 union register_pair r2 = { .even = __pa(table) | dtt, .odd = address, };
707
708 asm volatile(".insn rrf,0xb98f0000,%[r1],%[r2],%[asce],0"
709 : [r1] "+&d" (r1.pair)
710 : [r2] "d" (r2.pair), [asce] "a" (asce)
711 : "memory", "cc");
712 return old == r1.even;
713}
714
715/*
716 * pgd/p4d/pud/pmd/pte query functions
717 */
718static inline int pgd_folded(pgd_t pgd)
719{
720 return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1;
721}
722
723static inline int pgd_present(pgd_t pgd)
724{
725 if (pgd_folded(pgd))
726 return 1;
727 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
728}
729
730static inline int pgd_none(pgd_t pgd)
731{
732 if (pgd_folded(pgd))
733 return 0;
734 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
735}
736
737static inline int pgd_bad(pgd_t pgd)
738{
739 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1)
740 return 0;
741 return (pgd_val(pgd) & ~_REGION_ENTRY_BITS) != 0;
742}
743
744static inline unsigned long pgd_pfn(pgd_t pgd)
745{
746 unsigned long origin_mask;
747
748 origin_mask = _REGION_ENTRY_ORIGIN;
749 return (pgd_val(pgd) & origin_mask) >> PAGE_SHIFT;
750}
751
752static inline int p4d_folded(p4d_t p4d)
753{
754 return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2;
755}
756
757static inline int p4d_present(p4d_t p4d)
758{
759 if (p4d_folded(p4d))
760 return 1;
761 return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL;
762}
763
764static inline int p4d_none(p4d_t p4d)
765{
766 if (p4d_folded(p4d))
767 return 0;
768 return p4d_val(p4d) == _REGION2_ENTRY_EMPTY;
769}
770
771static inline unsigned long p4d_pfn(p4d_t p4d)
772{
773 unsigned long origin_mask;
774
775 origin_mask = _REGION_ENTRY_ORIGIN;
776 return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT;
777}
778
779static inline int pud_folded(pud_t pud)
780{
781 return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3;
782}
783
784static inline int pud_present(pud_t pud)
785{
786 if (pud_folded(pud))
787 return 1;
788 return (pud_val(pud) & _REGION3_ENTRY_PRESENT) != 0;
789}
790
791static inline int pud_none(pud_t pud)
792{
793 if (pud_folded(pud))
794 return 0;
795 return pud_val(pud) == _REGION3_ENTRY_EMPTY;
796}
797
798#define pud_leaf pud_leaf
799static inline bool pud_leaf(pud_t pud)
800{
801 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
802 return 0;
803 return (pud_present(pud) && (pud_val(pud) & _REGION3_ENTRY_LARGE) != 0);
804}
805
806static inline int pmd_present(pmd_t pmd)
807{
808 return (pmd_val(pmd) & _SEGMENT_ENTRY_PRESENT) != 0;
809}
810
811#define pmd_leaf pmd_leaf
812static inline bool pmd_leaf(pmd_t pmd)
813{
814 return (pmd_present(pmd) && (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0);
815}
816
817static inline int pmd_bad(pmd_t pmd)
818{
819 if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_leaf(pmd))
820 return 1;
821 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
822}
823
824static inline int pud_bad(pud_t pud)
825{
826 unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
827
828 if (type > _REGION_ENTRY_TYPE_R3 || pud_leaf(pud))
829 return 1;
830 if (type < _REGION_ENTRY_TYPE_R3)
831 return 0;
832 return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
833}
834
835static inline int p4d_bad(p4d_t p4d)
836{
837 unsigned long type = p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK;
838
839 if (type > _REGION_ENTRY_TYPE_R2)
840 return 1;
841 if (type < _REGION_ENTRY_TYPE_R2)
842 return 0;
843 return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0;
844}
845
846static inline int pmd_none(pmd_t pmd)
847{
848 return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
849}
850
851#define pmd_write pmd_write
852static inline int pmd_write(pmd_t pmd)
853{
854 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
855}
856
857#define pud_write pud_write
858static inline int pud_write(pud_t pud)
859{
860 return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0;
861}
862
863#define pmd_dirty pmd_dirty
864static inline int pmd_dirty(pmd_t pmd)
865{
866 return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
867}
868
869#define pmd_young pmd_young
870static inline int pmd_young(pmd_t pmd)
871{
872 return (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
873}
874
875static inline int pte_present(pte_t pte)
876{
877 /* Bit pattern: (pte & 0x001) == 0x001 */
878 return (pte_val(pte) & _PAGE_PRESENT) != 0;
879}
880
881static inline int pte_none(pte_t pte)
882{
883 /* Bit pattern: pte == 0x400 */
884 return pte_val(pte) == _PAGE_INVALID;
885}
886
887static inline int pte_swap(pte_t pte)
888{
889 /* Bit pattern: (pte & 0x201) == 0x200 */
890 return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
891 == _PAGE_PROTECT;
892}
893
894static inline int pte_special(pte_t pte)
895{
896 return (pte_val(pte) & _PAGE_SPECIAL);
897}
898
899#define __HAVE_ARCH_PTE_SAME
900static inline int pte_same(pte_t a, pte_t b)
901{
902 return pte_val(a) == pte_val(b);
903}
904
905#ifdef CONFIG_NUMA_BALANCING
906static inline int pte_protnone(pte_t pte)
907{
908 return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
909}
910
911static inline int pmd_protnone(pmd_t pmd)
912{
913 /* pmd_leaf(pmd) implies pmd_present(pmd) */
914 return pmd_leaf(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
915}
916#endif
917
918static inline bool pte_swp_exclusive(pte_t pte)
919{
920 return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
921}
922
923static inline pte_t pte_swp_mkexclusive(pte_t pte)
924{
925 return set_pte_bit(pte, __pgprot(_PAGE_SWP_EXCLUSIVE));
926}
927
928static inline pte_t pte_swp_clear_exclusive(pte_t pte)
929{
930 return clear_pte_bit(pte, __pgprot(_PAGE_SWP_EXCLUSIVE));
931}
932
933static inline int pte_soft_dirty(pte_t pte)
934{
935 return pte_val(pte) & _PAGE_SOFT_DIRTY;
936}
937#define pte_swp_soft_dirty pte_soft_dirty
938
939static inline pte_t pte_mksoft_dirty(pte_t pte)
940{
941 return set_pte_bit(pte, __pgprot(_PAGE_SOFT_DIRTY));
942}
943#define pte_swp_mksoft_dirty pte_mksoft_dirty
944
945static inline pte_t pte_clear_soft_dirty(pte_t pte)
946{
947 return clear_pte_bit(pte, __pgprot(_PAGE_SOFT_DIRTY));
948}
949#define pte_swp_clear_soft_dirty pte_clear_soft_dirty
950
951static inline int pmd_soft_dirty(pmd_t pmd)
952{
953 return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
954}
955
956static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
957{
958 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_SOFT_DIRTY));
959}
960
961static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
962{
963 return clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_SOFT_DIRTY));
964}
965
966#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
967#define pmd_swp_soft_dirty(pmd) pmd_soft_dirty(pmd)
968#define pmd_swp_mksoft_dirty(pmd) pmd_mksoft_dirty(pmd)
969#define pmd_swp_clear_soft_dirty(pmd) pmd_clear_soft_dirty(pmd)
970#endif
971
972/*
973 * query functions pte_write/pte_dirty/pte_young only work if
974 * pte_present() is true. Undefined behaviour if not..
975 */
976static inline int pte_write(pte_t pte)
977{
978 return (pte_val(pte) & _PAGE_WRITE) != 0;
979}
980
981static inline int pte_dirty(pte_t pte)
982{
983 return (pte_val(pte) & _PAGE_DIRTY) != 0;
984}
985
986static inline int pte_young(pte_t pte)
987{
988 return (pte_val(pte) & _PAGE_YOUNG) != 0;
989}
990
991#define __HAVE_ARCH_PTE_UNUSED
992static inline int pte_unused(pte_t pte)
993{
994 return pte_val(pte) & _PAGE_UNUSED;
995}
996
997/*
998 * Extract the pgprot value from the given pte while at the same time making it
999 * usable for kernel address space mappings where fault driven dirty and
1000 * young/old accounting is not supported, i.e _PAGE_PROTECT and _PAGE_INVALID
1001 * must not be set.
1002 */
1003#define pte_pgprot pte_pgprot
1004static inline pgprot_t pte_pgprot(pte_t pte)
1005{
1006 unsigned long pte_flags = pte_val(pte) & _PAGE_CHG_MASK;
1007
1008 if (pte_write(pte))
1009 pte_flags |= pgprot_val(PAGE_KERNEL);
1010 else
1011 pte_flags |= pgprot_val(PAGE_KERNEL_RO);
1012 pte_flags |= pte_val(pte) & mio_wb_bit_mask;
1013
1014 return __pgprot(pte_flags);
1015}
1016
1017/*
1018 * pgd/pmd/pte modification functions
1019 */
1020
1021static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
1022{
1023 WRITE_ONCE(*pgdp, pgd);
1024}
1025
1026static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
1027{
1028 WRITE_ONCE(*p4dp, p4d);
1029}
1030
1031static inline void set_pud(pud_t *pudp, pud_t pud)
1032{
1033 WRITE_ONCE(*pudp, pud);
1034}
1035
1036static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
1037{
1038 WRITE_ONCE(*pmdp, pmd);
1039}
1040
1041static inline void set_pte(pte_t *ptep, pte_t pte)
1042{
1043 WRITE_ONCE(*ptep, pte);
1044}
1045
1046static inline void pgd_clear(pgd_t *pgd)
1047{
1048 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
1049 set_pgd(pgd, __pgd(_REGION1_ENTRY_EMPTY));
1050}
1051
1052static inline void p4d_clear(p4d_t *p4d)
1053{
1054 if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
1055 set_p4d(p4d, __p4d(_REGION2_ENTRY_EMPTY));
1056}
1057
1058static inline void pud_clear(pud_t *pud)
1059{
1060 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
1061 set_pud(pud, __pud(_REGION3_ENTRY_EMPTY));
1062}
1063
1064static inline void pmd_clear(pmd_t *pmdp)
1065{
1066 set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1067}
1068
1069static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
1070{
1071 set_pte(ptep, __pte(_PAGE_INVALID));
1072}
1073
1074/*
1075 * The following pte modification functions only work if
1076 * pte_present() is true. Undefined behaviour if not..
1077 */
1078static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
1079{
1080 pte = clear_pte_bit(pte, __pgprot(~_PAGE_CHG_MASK));
1081 pte = set_pte_bit(pte, newprot);
1082 /*
1083 * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX
1084 * has the invalid bit set, clear it again for readable, young pages
1085 */
1086 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
1087 pte = clear_pte_bit(pte, __pgprot(_PAGE_INVALID));
1088 /*
1089 * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page
1090 * protection bit set, clear it again for writable, dirty pages
1091 */
1092 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
1093 pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT));
1094 return pte;
1095}
1096
1097static inline pte_t pte_wrprotect(pte_t pte)
1098{
1099 pte = clear_pte_bit(pte, __pgprot(_PAGE_WRITE));
1100 return set_pte_bit(pte, __pgprot(_PAGE_PROTECT));
1101}
1102
1103static inline pte_t pte_mkwrite_novma(pte_t pte)
1104{
1105 pte = set_pte_bit(pte, __pgprot(_PAGE_WRITE));
1106 if (pte_val(pte) & _PAGE_DIRTY)
1107 pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT));
1108 return pte;
1109}
1110
1111static inline pte_t pte_mkclean(pte_t pte)
1112{
1113 pte = clear_pte_bit(pte, __pgprot(_PAGE_DIRTY));
1114 return set_pte_bit(pte, __pgprot(_PAGE_PROTECT));
1115}
1116
1117static inline pte_t pte_mkdirty(pte_t pte)
1118{
1119 pte = set_pte_bit(pte, __pgprot(_PAGE_DIRTY | _PAGE_SOFT_DIRTY));
1120 if (pte_val(pte) & _PAGE_WRITE)
1121 pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT));
1122 return pte;
1123}
1124
1125static inline pte_t pte_mkold(pte_t pte)
1126{
1127 pte = clear_pte_bit(pte, __pgprot(_PAGE_YOUNG));
1128 return set_pte_bit(pte, __pgprot(_PAGE_INVALID));
1129}
1130
1131static inline pte_t pte_mkyoung(pte_t pte)
1132{
1133 pte = set_pte_bit(pte, __pgprot(_PAGE_YOUNG));
1134 if (pte_val(pte) & _PAGE_READ)
1135 pte = clear_pte_bit(pte, __pgprot(_PAGE_INVALID));
1136 return pte;
1137}
1138
1139static inline pte_t pte_mkspecial(pte_t pte)
1140{
1141 return set_pte_bit(pte, __pgprot(_PAGE_SPECIAL));
1142}
1143
1144#ifdef CONFIG_HUGETLB_PAGE
1145static inline pte_t pte_mkhuge(pte_t pte)
1146{
1147 return set_pte_bit(pte, __pgprot(_PAGE_LARGE));
1148}
1149#endif
1150
1151#define IPTE_GLOBAL 0
1152#define IPTE_LOCAL 1
1153
1154#define IPTE_NODAT 0x400
1155#define IPTE_GUEST_ASCE 0x800
1156
1157static __always_inline void __ptep_rdp(unsigned long addr, pte_t *ptep, int local)
1158{
1159 unsigned long pto;
1160
1161 pto = __pa(ptep) & ~(PTRS_PER_PTE * sizeof(pte_t) - 1);
1162 asm volatile(".insn rrf,0xb98b0000,%[r1],%[r2],%%r0,%[m4]"
1163 : "+m" (*ptep)
1164 : [r1] "a" (pto), [r2] "a" (addr & PAGE_MASK),
1165 [m4] "i" (local));
1166}
1167
1168static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep,
1169 unsigned long opt, unsigned long asce,
1170 int local)
1171{
1172 unsigned long pto = __pa(ptep);
1173
1174 if (__builtin_constant_p(opt) && opt == 0) {
1175 /* Invalidation + TLB flush for the pte */
1176 asm volatile(
1177 " ipte %[r1],%[r2],0,%[m4]"
1178 : "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
1179 [m4] "i" (local));
1180 return;
1181 }
1182
1183 /* Invalidate ptes with options + TLB flush of the ptes */
1184 opt = opt | (asce & _ASCE_ORIGIN);
1185 asm volatile(
1186 " ipte %[r1],%[r2],%[r3],%[m4]"
1187 : [r2] "+a" (address), [r3] "+a" (opt)
1188 : [r1] "a" (pto), [m4] "i" (local) : "memory");
1189}
1190
1191static __always_inline void __ptep_ipte_range(unsigned long address, int nr,
1192 pte_t *ptep, int local)
1193{
1194 unsigned long pto = __pa(ptep);
1195
1196 /* Invalidate a range of ptes + TLB flush of the ptes */
1197 do {
1198 asm volatile(
1199 " ipte %[r1],%[r2],%[r3],%[m4]"
1200 : [r2] "+a" (address), [r3] "+a" (nr)
1201 : [r1] "a" (pto), [m4] "i" (local) : "memory");
1202 } while (nr != 255);
1203}
1204
1205/*
1206 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
1207 * both clear the TLB for the unmapped pte. The reason is that
1208 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
1209 * to modify an active pte. The sequence is
1210 * 1) ptep_get_and_clear
1211 * 2) set_pte_at
1212 * 3) flush_tlb_range
1213 * On s390 the tlb needs to get flushed with the modification of the pte
1214 * if the pte is active. The only way how this can be implemented is to
1215 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
1216 * is a nop.
1217 */
1218pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t);
1219pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t);
1220
1221#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1222static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1223 unsigned long addr, pte_t *ptep)
1224{
1225 pte_t pte = *ptep;
1226
1227 pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
1228 return pte_young(pte);
1229}
1230
1231#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1232static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1233 unsigned long address, pte_t *ptep)
1234{
1235 return ptep_test_and_clear_young(vma, address, ptep);
1236}
1237
1238#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1239static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1240 unsigned long addr, pte_t *ptep)
1241{
1242 pte_t res;
1243
1244 res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1245 /* At this point the reference through the mapping is still present */
1246 if (mm_is_protected(mm) && pte_present(res))
1247 uv_convert_from_secure_pte(res);
1248 return res;
1249}
1250
1251#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1252pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *);
1253void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long,
1254 pte_t *, pte_t, pte_t);
1255
1256#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
1257static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1258 unsigned long addr, pte_t *ptep)
1259{
1260 pte_t res;
1261
1262 res = ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
1263 /* At this point the reference through the mapping is still present */
1264 if (mm_is_protected(vma->vm_mm) && pte_present(res))
1265 uv_convert_from_secure_pte(res);
1266 return res;
1267}
1268
1269/*
1270 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1271 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1272 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1273 * cannot be accessed while the batched unmap is running. In this case
1274 * full==1 and a simple pte_clear is enough. See tlb.h.
1275 */
1276#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1277static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1278 unsigned long addr,
1279 pte_t *ptep, int full)
1280{
1281 pte_t res;
1282
1283 if (full) {
1284 res = *ptep;
1285 set_pte(ptep, __pte(_PAGE_INVALID));
1286 } else {
1287 res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1288 }
1289 /* Nothing to do */
1290 if (!mm_is_protected(mm) || !pte_present(res))
1291 return res;
1292 /*
1293 * At this point the reference through the mapping is still present.
1294 * The notifier should have destroyed all protected vCPUs at this
1295 * point, so the destroy should be successful.
1296 */
1297 if (full && !uv_destroy_pte(res))
1298 return res;
1299 /*
1300 * If something went wrong and the page could not be destroyed, or
1301 * if this is not a mm teardown, the slower export is used as
1302 * fallback instead.
1303 */
1304 uv_convert_from_secure_pte(res);
1305 return res;
1306}
1307
1308#define __HAVE_ARCH_PTEP_SET_WRPROTECT
1309static inline void ptep_set_wrprotect(struct mm_struct *mm,
1310 unsigned long addr, pte_t *ptep)
1311{
1312 pte_t pte = *ptep;
1313
1314 if (pte_write(pte))
1315 ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
1316}
1317
1318/*
1319 * Check if PTEs only differ in _PAGE_PROTECT HW bit, but also allow SW PTE
1320 * bits in the comparison. Those might change e.g. because of dirty and young
1321 * tracking.
1322 */
1323static inline int pte_allow_rdp(pte_t old, pte_t new)
1324{
1325 /*
1326 * Only allow changes from RO to RW
1327 */
1328 if (!(pte_val(old) & _PAGE_PROTECT) || pte_val(new) & _PAGE_PROTECT)
1329 return 0;
1330
1331 return (pte_val(old) & _PAGE_RDP_MASK) == (pte_val(new) & _PAGE_RDP_MASK);
1332}
1333
1334static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
1335 unsigned long address,
1336 pte_t *ptep)
1337{
1338 /*
1339 * RDP might not have propagated the PTE protection reset to all CPUs,
1340 * so there could be spurious TLB protection faults.
1341 * NOTE: This will also be called when a racing pagetable update on
1342 * another thread already installed the correct PTE. Both cases cannot
1343 * really be distinguished.
1344 * Therefore, only do the local TLB flush when RDP can be used, and the
1345 * PTE does not have _PAGE_PROTECT set, to avoid unnecessary overhead.
1346 * A local RDP can be used to do the flush.
1347 */
1348 if (cpu_has_rdp() && !(pte_val(*ptep) & _PAGE_PROTECT))
1349 __ptep_rdp(address, ptep, 1);
1350}
1351#define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault
1352
1353void ptep_reset_dat_prot(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
1354 pte_t new);
1355
1356#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1357static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1358 unsigned long addr, pte_t *ptep,
1359 pte_t entry, int dirty)
1360{
1361 if (pte_same(*ptep, entry))
1362 return 0;
1363 if (cpu_has_rdp() && !mm_has_pgste(vma->vm_mm) && pte_allow_rdp(*ptep, entry))
1364 ptep_reset_dat_prot(vma->vm_mm, addr, ptep, entry);
1365 else
1366 ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
1367 return 1;
1368}
1369
1370/*
1371 * Additional functions to handle KVM guest page tables
1372 */
1373void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
1374 pte_t *ptep, pte_t entry);
1375void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1376void ptep_notify(struct mm_struct *mm, unsigned long addr,
1377 pte_t *ptep, unsigned long bits);
1378int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr,
1379 pte_t *ptep, int prot, unsigned long bit);
1380void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
1381 pte_t *ptep , int reset);
1382void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1383int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
1384 pte_t *sptep, pte_t *tptep, pte_t pte);
1385void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
1386
1387bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address,
1388 pte_t *ptep);
1389int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1390 unsigned char key, bool nq);
1391int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1392 unsigned char key, unsigned char *oldkey,
1393 bool nq, bool mr, bool mc);
1394int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr);
1395int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1396 unsigned char *key);
1397
1398int set_pgste_bits(struct mm_struct *mm, unsigned long addr,
1399 unsigned long bits, unsigned long value);
1400int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep);
1401int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
1402 unsigned long *oldpte, unsigned long *oldpgste);
1403void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr);
1404void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr);
1405void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr);
1406void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
1407
1408#define pgprot_writecombine pgprot_writecombine
1409pgprot_t pgprot_writecombine(pgprot_t prot);
1410
1411#define PFN_PTE_SHIFT PAGE_SHIFT
1412
1413/*
1414 * Set multiple PTEs to consecutive pages with a single call. All PTEs
1415 * are within the same folio, PMD and VMA.
1416 */
1417static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
1418 pte_t *ptep, pte_t entry, unsigned int nr)
1419{
1420 if (pte_present(entry))
1421 entry = clear_pte_bit(entry, __pgprot(_PAGE_UNUSED));
1422 if (mm_has_pgste(mm)) {
1423 for (;;) {
1424 ptep_set_pte_at(mm, addr, ptep, entry);
1425 if (--nr == 0)
1426 break;
1427 ptep++;
1428 entry = __pte(pte_val(entry) + PAGE_SIZE);
1429 addr += PAGE_SIZE;
1430 }
1431 } else {
1432 for (;;) {
1433 set_pte(ptep, entry);
1434 if (--nr == 0)
1435 break;
1436 ptep++;
1437 entry = __pte(pte_val(entry) + PAGE_SIZE);
1438 }
1439 }
1440}
1441#define set_ptes set_ptes
1442
1443/*
1444 * Conversion functions: convert a page and protection to a page entry,
1445 * and a page entry and page directory to the page they refer to.
1446 */
1447static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1448{
1449 pte_t __pte;
1450
1451 __pte = __pte(physpage | pgprot_val(pgprot));
1452 return pte_mkyoung(__pte);
1453}
1454
1455#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1456#define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
1457#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1458#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1459
1460#define p4d_deref(pud) ((unsigned long)__va(p4d_val(pud) & _REGION_ENTRY_ORIGIN))
1461#define pgd_deref(pgd) ((unsigned long)__va(pgd_val(pgd) & _REGION_ENTRY_ORIGIN))
1462
1463static inline unsigned long pmd_deref(pmd_t pmd)
1464{
1465 unsigned long origin_mask;
1466
1467 origin_mask = _SEGMENT_ENTRY_ORIGIN;
1468 if (pmd_leaf(pmd))
1469 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
1470 return (unsigned long)__va(pmd_val(pmd) & origin_mask);
1471}
1472
1473static inline unsigned long pmd_pfn(pmd_t pmd)
1474{
1475 return __pa(pmd_deref(pmd)) >> PAGE_SHIFT;
1476}
1477
1478static inline unsigned long pud_deref(pud_t pud)
1479{
1480 unsigned long origin_mask;
1481
1482 origin_mask = _REGION_ENTRY_ORIGIN;
1483 if (pud_leaf(pud))
1484 origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
1485 return (unsigned long)__va(pud_val(pud) & origin_mask);
1486}
1487
1488#define pud_pfn pud_pfn
1489static inline unsigned long pud_pfn(pud_t pud)
1490{
1491 return __pa(pud_deref(pud)) >> PAGE_SHIFT;
1492}
1493
1494/*
1495 * The pgd_offset function *always* adds the index for the top-level
1496 * region/segment table. This is done to get a sequence like the
1497 * following to work:
1498 * pgdp = pgd_offset(current->mm, addr);
1499 * pgd = READ_ONCE(*pgdp);
1500 * p4dp = p4d_offset(&pgd, addr);
1501 * ...
1502 * The subsequent p4d_offset, pud_offset and pmd_offset functions
1503 * only add an index if they dereferenced the pointer.
1504 */
1505static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
1506{
1507 unsigned long rste;
1508 unsigned int shift;
1509
1510 /* Get the first entry of the top level table */
1511 rste = pgd_val(*pgd);
1512 /* Pick up the shift from the table type of the first entry */
1513 shift = ((rste & _REGION_ENTRY_TYPE_MASK) >> 2) * 11 + 20;
1514 return pgd + ((address >> shift) & (PTRS_PER_PGD - 1));
1515}
1516
1517#define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
1518
1519static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address)
1520{
1521 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
1522 return (p4d_t *) pgd_deref(pgd) + p4d_index(address);
1523 return (p4d_t *) pgdp;
1524}
1525#define p4d_offset_lockless p4d_offset_lockless
1526
1527static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address)
1528{
1529 return p4d_offset_lockless(pgdp, *pgdp, address);
1530}
1531
1532static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address)
1533{
1534 if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
1535 return (pud_t *) p4d_deref(p4d) + pud_index(address);
1536 return (pud_t *) p4dp;
1537}
1538#define pud_offset_lockless pud_offset_lockless
1539
1540static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address)
1541{
1542 return pud_offset_lockless(p4dp, *p4dp, address);
1543}
1544#define pud_offset pud_offset
1545
1546static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address)
1547{
1548 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
1549 return (pmd_t *) pud_deref(pud) + pmd_index(address);
1550 return (pmd_t *) pudp;
1551}
1552#define pmd_offset_lockless pmd_offset_lockless
1553
1554static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address)
1555{
1556 return pmd_offset_lockless(pudp, *pudp, address);
1557}
1558#define pmd_offset pmd_offset
1559
1560static inline unsigned long pmd_page_vaddr(pmd_t pmd)
1561{
1562 return (unsigned long) pmd_deref(pmd);
1563}
1564
1565static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
1566{
1567 return end <= current->mm->context.asce_limit;
1568}
1569#define gup_fast_permitted gup_fast_permitted
1570
1571#define pfn_pte(pfn, pgprot) mk_pte_phys(((pfn) << PAGE_SHIFT), (pgprot))
1572#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1573#define pte_page(x) pfn_to_page(pte_pfn(x))
1574
1575#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1576#define pud_page(pud) pfn_to_page(pud_pfn(pud))
1577#define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
1578#define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
1579
1580static inline pmd_t pmd_wrprotect(pmd_t pmd)
1581{
1582 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_WRITE));
1583 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1584}
1585
1586static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
1587{
1588 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_WRITE));
1589 if (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)
1590 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1591 return pmd;
1592}
1593
1594static inline pmd_t pmd_mkclean(pmd_t pmd)
1595{
1596 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_DIRTY));
1597 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1598}
1599
1600static inline pmd_t pmd_mkdirty(pmd_t pmd)
1601{
1602 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_SOFT_DIRTY));
1603 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1604 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1605 return pmd;
1606}
1607
1608static inline pud_t pud_wrprotect(pud_t pud)
1609{
1610 pud = clear_pud_bit(pud, __pgprot(_REGION3_ENTRY_WRITE));
1611 return set_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
1612}
1613
1614static inline pud_t pud_mkwrite(pud_t pud)
1615{
1616 pud = set_pud_bit(pud, __pgprot(_REGION3_ENTRY_WRITE));
1617 if (pud_val(pud) & _REGION3_ENTRY_DIRTY)
1618 pud = clear_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
1619 return pud;
1620}
1621
1622static inline pud_t pud_mkclean(pud_t pud)
1623{
1624 pud = clear_pud_bit(pud, __pgprot(_REGION3_ENTRY_DIRTY));
1625 return set_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
1626}
1627
1628static inline pud_t pud_mkdirty(pud_t pud)
1629{
1630 pud = set_pud_bit(pud, __pgprot(_REGION3_ENTRY_DIRTY | _REGION3_ENTRY_SOFT_DIRTY));
1631 if (pud_val(pud) & _REGION3_ENTRY_WRITE)
1632 pud = clear_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
1633 return pud;
1634}
1635
1636#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1637static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1638{
1639 /*
1640 * pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX
1641 * (see __Pxxx / __Sxxx). Convert to segment table entry format.
1642 */
1643 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1644 return pgprot_val(SEGMENT_NONE);
1645 if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
1646 return pgprot_val(SEGMENT_RO);
1647 if (pgprot_val(pgprot) == pgprot_val(PAGE_RX))
1648 return pgprot_val(SEGMENT_RX);
1649 if (pgprot_val(pgprot) == pgprot_val(PAGE_RW))
1650 return pgprot_val(SEGMENT_RW);
1651 return pgprot_val(SEGMENT_RWX);
1652}
1653
1654static inline pmd_t pmd_mkyoung(pmd_t pmd)
1655{
1656 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG));
1657 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1658 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
1659 return pmd;
1660}
1661
1662static inline pmd_t pmd_mkold(pmd_t pmd)
1663{
1664 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG));
1665 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
1666}
1667
1668static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1669{
1670 unsigned long mask;
1671
1672 mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
1673 mask |= _SEGMENT_ENTRY_DIRTY;
1674 mask |= _SEGMENT_ENTRY_YOUNG;
1675 mask |= _SEGMENT_ENTRY_LARGE;
1676 mask |= _SEGMENT_ENTRY_SOFT_DIRTY;
1677 pmd = __pmd(pmd_val(pmd) & mask);
1678 pmd = set_pmd_bit(pmd, __pgprot(massage_pgprot_pmd(newprot)));
1679 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1680 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1681 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1682 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
1683 return pmd;
1684}
1685
1686static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1687{
1688 return __pmd(physpage + massage_pgprot_pmd(pgprot));
1689}
1690
1691#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1692
1693static inline void __pmdp_csp(pmd_t *pmdp)
1694{
1695 csp((unsigned int *)pmdp + 1, pmd_val(*pmdp),
1696 pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1697}
1698
1699#define IDTE_GLOBAL 0
1700#define IDTE_LOCAL 1
1701
1702#define IDTE_PTOA 0x0800
1703#define IDTE_NODAT 0x1000
1704#define IDTE_GUEST_ASCE 0x2000
1705
1706static __always_inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
1707 unsigned long opt, unsigned long asce,
1708 int local)
1709{
1710 unsigned long sto;
1711
1712 sto = __pa(pmdp) - pmd_index(addr) * sizeof(pmd_t);
1713 if (__builtin_constant_p(opt) && opt == 0) {
1714 /* flush without guest asce */
1715 asm volatile(
1716 " idte %[r1],0,%[r2],%[m4]"
1717 : "+m" (*pmdp)
1718 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)),
1719 [m4] "i" (local)
1720 : "cc" );
1721 } else {
1722 /* flush with guest asce */
1723 asm volatile(
1724 " idte %[r1],%[r3],%[r2],%[m4]"
1725 : "+m" (*pmdp)
1726 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt),
1727 [r3] "a" (asce), [m4] "i" (local)
1728 : "cc" );
1729 }
1730}
1731
1732static __always_inline void __pudp_idte(unsigned long addr, pud_t *pudp,
1733 unsigned long opt, unsigned long asce,
1734 int local)
1735{
1736 unsigned long r3o;
1737
1738 r3o = __pa(pudp) - pud_index(addr) * sizeof(pud_t);
1739 r3o |= _ASCE_TYPE_REGION3;
1740 if (__builtin_constant_p(opt) && opt == 0) {
1741 /* flush without guest asce */
1742 asm volatile(
1743 " idte %[r1],0,%[r2],%[m4]"
1744 : "+m" (*pudp)
1745 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)),
1746 [m4] "i" (local)
1747 : "cc");
1748 } else {
1749 /* flush with guest asce */
1750 asm volatile(
1751 " idte %[r1],%[r3],%[r2],%[m4]"
1752 : "+m" (*pudp)
1753 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt),
1754 [r3] "a" (asce), [m4] "i" (local)
1755 : "cc" );
1756 }
1757}
1758
1759pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1760pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1761pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t);
1762
1763#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1764
1765#define __HAVE_ARCH_PGTABLE_DEPOSIT
1766void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1767 pgtable_t pgtable);
1768
1769#define __HAVE_ARCH_PGTABLE_WITHDRAW
1770pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1771
1772#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1773static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
1774 unsigned long addr, pmd_t *pmdp,
1775 pmd_t entry, int dirty)
1776{
1777 VM_BUG_ON(addr & ~HPAGE_MASK);
1778
1779 entry = pmd_mkyoung(entry);
1780 if (dirty)
1781 entry = pmd_mkdirty(entry);
1782 if (pmd_val(*pmdp) == pmd_val(entry))
1783 return 0;
1784 pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
1785 return 1;
1786}
1787
1788#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1789static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1790 unsigned long addr, pmd_t *pmdp)
1791{
1792 pmd_t pmd = *pmdp;
1793
1794 pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
1795 return pmd_young(pmd);
1796}
1797
1798#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1799static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
1800 unsigned long addr, pmd_t *pmdp)
1801{
1802 VM_BUG_ON(addr & ~HPAGE_MASK);
1803 return pmdp_test_and_clear_young(vma, addr, pmdp);
1804}
1805
1806static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1807 pmd_t *pmdp, pmd_t entry)
1808{
1809 set_pmd(pmdp, entry);
1810}
1811
1812static inline pmd_t pmd_mkhuge(pmd_t pmd)
1813{
1814 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_LARGE));
1815 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG));
1816 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1817}
1818
1819#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1820static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1821 unsigned long addr, pmd_t *pmdp)
1822{
1823 return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1824}
1825
1826#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
1827static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
1828 unsigned long addr,
1829 pmd_t *pmdp, int full)
1830{
1831 if (full) {
1832 pmd_t pmd = *pmdp;
1833 set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1834 return pmd;
1835 }
1836 return pmdp_xchg_lazy(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1837}
1838
1839#define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
1840static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
1841 unsigned long addr, pmd_t *pmdp)
1842{
1843 return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
1844}
1845
1846#define __HAVE_ARCH_PMDP_INVALIDATE
1847static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
1848 unsigned long addr, pmd_t *pmdp)
1849{
1850 pmd_t pmd;
1851
1852 VM_WARN_ON_ONCE(!pmd_present(*pmdp));
1853 pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1854 return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
1855}
1856
1857#define __HAVE_ARCH_PMDP_SET_WRPROTECT
1858static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1859 unsigned long addr, pmd_t *pmdp)
1860{
1861 pmd_t pmd = *pmdp;
1862
1863 if (pmd_write(pmd))
1864 pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
1865}
1866
1867static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1868 unsigned long address,
1869 pmd_t *pmdp)
1870{
1871 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
1872}
1873#define pmdp_collapse_flush pmdp_collapse_flush
1874
1875#define pfn_pmd(pfn, pgprot) mk_pmd_phys(((pfn) << PAGE_SHIFT), (pgprot))
1876
1877static inline int pmd_trans_huge(pmd_t pmd)
1878{
1879 return pmd_leaf(pmd);
1880}
1881
1882#define has_transparent_hugepage has_transparent_hugepage
1883static inline int has_transparent_hugepage(void)
1884{
1885 return cpu_has_edat1() ? 1 : 0;
1886}
1887#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1888
1889/*
1890 * 64 bit swap entry format:
1891 * A page-table entry has some bits we have to treat in a special way.
1892 * Bits 54 and 63 are used to indicate the page type. Bit 53 marks the pte
1893 * as invalid.
1894 * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
1895 * | offset |E11XX|type |S0|
1896 * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
1897 * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
1898 *
1899 * Bits 0-51 store the offset.
1900 * Bit 52 (E) is used to remember PG_anon_exclusive.
1901 * Bits 57-61 store the type.
1902 * Bit 62 (S) is used for softdirty tracking.
1903 * Bits 55 and 56 (X) are unused.
1904 */
1905
1906#define __SWP_OFFSET_MASK ((1UL << 52) - 1)
1907#define __SWP_OFFSET_SHIFT 12
1908#define __SWP_TYPE_MASK ((1UL << 5) - 1)
1909#define __SWP_TYPE_SHIFT 2
1910
1911static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1912{
1913 unsigned long pteval;
1914
1915 pteval = _PAGE_INVALID | _PAGE_PROTECT;
1916 pteval |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
1917 pteval |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
1918 return __pte(pteval);
1919}
1920
1921static inline unsigned long __swp_type(swp_entry_t entry)
1922{
1923 return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
1924}
1925
1926static inline unsigned long __swp_offset(swp_entry_t entry)
1927{
1928 return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
1929}
1930
1931static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
1932{
1933 return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
1934}
1935
1936#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1937#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1938
1939/*
1940 * 64 bit swap entry format for REGION3 and SEGMENT table entries (RSTE)
1941 * Bits 59 and 63 are used to indicate the swap entry. Bit 58 marks the rste
1942 * as invalid.
1943 * A swap entry is indicated by bit pattern (rste & 0x011) == 0x010
1944 * | offset |Xtype |11TT|S0|
1945 * |0000000000111111111122222222223333333333444444444455|555555|5566|66|
1946 * |0123456789012345678901234567890123456789012345678901|234567|8901|23|
1947 *
1948 * Bits 0-51 store the offset.
1949 * Bits 53-57 store the type.
1950 * Bit 62 (S) is used for softdirty tracking.
1951 * Bits 60-61 (TT) indicate the table type: 0x01 for REGION3 and 0x00 for SEGMENT.
1952 * Bit 52 (X) is unused.
1953 */
1954
1955#define __SWP_OFFSET_MASK_RSTE ((1UL << 52) - 1)
1956#define __SWP_OFFSET_SHIFT_RSTE 12
1957#define __SWP_TYPE_MASK_RSTE ((1UL << 5) - 1)
1958#define __SWP_TYPE_SHIFT_RSTE 6
1959
1960/*
1961 * TT bits set to 0x00 == SEGMENT. For REGION3 entries, caller must add R3
1962 * bits 0x01. See also __set_huge_pte_at().
1963 */
1964static inline unsigned long mk_swap_rste(unsigned long type, unsigned long offset)
1965{
1966 unsigned long rste;
1967
1968 rste = _RST_ENTRY_INVALID | _RST_ENTRY_COMM;
1969 rste |= (offset & __SWP_OFFSET_MASK_RSTE) << __SWP_OFFSET_SHIFT_RSTE;
1970 rste |= (type & __SWP_TYPE_MASK_RSTE) << __SWP_TYPE_SHIFT_RSTE;
1971 return rste;
1972}
1973
1974static inline unsigned long __swp_type_rste(swp_entry_t entry)
1975{
1976 return (entry.val >> __SWP_TYPE_SHIFT_RSTE) & __SWP_TYPE_MASK_RSTE;
1977}
1978
1979static inline unsigned long __swp_offset_rste(swp_entry_t entry)
1980{
1981 return (entry.val >> __SWP_OFFSET_SHIFT_RSTE) & __SWP_OFFSET_MASK_RSTE;
1982}
1983
1984#define __rste_to_swp_entry(rste) ((swp_entry_t) { rste })
1985
1986/*
1987 * s390 has different layout for PTE and region / segment table entries (RSTE).
1988 * This is also true for swap entries, and their swap type and offset encoding.
1989 * For hugetlbfs PTE_MARKER support, s390 has internal __swp_type_rste() and
1990 * __swp_offset_rste() helpers to correctly handle RSTE swap entries.
1991 *
1992 * But common swap code does not know about this difference, and only uses
1993 * __swp_type(), __swp_offset() and __swp_entry() helpers for conversion between
1994 * arch-dependent and arch-independent representation of swp_entry_t for all
1995 * pagetable levels. On s390, those helpers only work for PTE swap entries.
1996 *
1997 * Therefore, implement __pmd_to_swp_entry() to build a fake PTE swap entry
1998 * and return the arch-dependent representation of that. Correspondingly,
1999 * implement __swp_entry_to_pmd() to convert that into a proper PMD swap
2000 * entry again. With this, the arch-dependent swp_entry_t representation will
2001 * always look like a PTE swap entry in common code.
2002 *
2003 * This is somewhat similar to fake PTEs in hugetlbfs code for s390, but only
2004 * requires conversion of the swap type and offset, and not all the possible
2005 * PTE bits.
2006 */
2007static inline swp_entry_t __pmd_to_swp_entry(pmd_t pmd)
2008{
2009 swp_entry_t arch_entry;
2010 pte_t pte;
2011
2012 arch_entry = __rste_to_swp_entry(pmd_val(pmd));
2013 pte = mk_swap_pte(__swp_type_rste(arch_entry), __swp_offset_rste(arch_entry));
2014 return __pte_to_swp_entry(pte);
2015}
2016
2017static inline pmd_t __swp_entry_to_pmd(swp_entry_t arch_entry)
2018{
2019 pmd_t pmd;
2020
2021 pmd = __pmd(mk_swap_rste(__swp_type(arch_entry), __swp_offset(arch_entry)));
2022 return pmd;
2023}
2024
2025extern int vmem_add_mapping(unsigned long start, unsigned long size);
2026extern void vmem_remove_mapping(unsigned long start, unsigned long size);
2027extern int __vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot, bool alloc);
2028extern int vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot);
2029extern void vmem_unmap_4k_page(unsigned long addr);
2030extern pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc);
2031extern int s390_enable_sie(void);
2032extern int s390_enable_skey(void);
2033extern void s390_reset_cmma(struct mm_struct *mm);
2034
2035/* s390 has a private copy of get unmapped area to deal with cache synonyms */
2036#define HAVE_ARCH_UNMAPPED_AREA
2037#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
2038
2039#define pmd_pgtable(pmd) \
2040 ((pgtable_t)__va(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE))
2041
2042static inline unsigned long gmap_pgste_get_pgt_addr(unsigned long *pgt)
2043{
2044 unsigned long *pgstes, res;
2045
2046 pgstes = pgt + _PAGE_ENTRIES;
2047
2048 res = (pgstes[0] & PGSTE_ST2_MASK) << 16;
2049 res |= pgstes[1] & PGSTE_ST2_MASK;
2050 res |= (pgstes[2] & PGSTE_ST2_MASK) >> 16;
2051 res |= (pgstes[3] & PGSTE_ST2_MASK) >> 32;
2052
2053 return res;
2054}
2055
2056static inline pgste_t pgste_get_lock(pte_t *ptep)
2057{
2058 unsigned long value = 0;
2059#ifdef CONFIG_PGSTE
2060 unsigned long *ptr = (unsigned long *)(ptep + PTRS_PER_PTE);
2061
2062 do {
2063 value = __atomic64_or_barrier(PGSTE_PCL_BIT, ptr);
2064 } while (value & PGSTE_PCL_BIT);
2065 value |= PGSTE_PCL_BIT;
2066#endif
2067 return __pgste(value);
2068}
2069
2070static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
2071{
2072#ifdef CONFIG_PGSTE
2073 barrier();
2074 WRITE_ONCE(*(unsigned long *)(ptep + PTRS_PER_PTE), pgste_val(pgste) & ~PGSTE_PCL_BIT);
2075#endif
2076}
2077
2078#endif /* _S390_PAGE_H */