Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * S390 version
4 * Copyright IBM Corp. 1999, 2000
5 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 * Ulrich Weigand (weigand@de.ibm.com)
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 *
9 * Derived from "include/asm-i386/pgtable.h"
10 */
11
12#ifndef _ASM_S390_PGTABLE_H
13#define _ASM_S390_PGTABLE_H
14
15#include <linux/sched.h>
16#include <linux/mm_types.h>
17#include <linux/page-flags.h>
18#include <linux/radix-tree.h>
19#include <linux/atomic.h>
20#include <asm/sections.h>
21#include <asm/bug.h>
22#include <asm/page.h>
23#include <asm/uv.h>
24
25extern pgd_t swapper_pg_dir[];
26extern void paging_init(void);
27extern unsigned long s390_invalid_asce;
28
29enum {
30 PG_DIRECT_MAP_4K = 0,
31 PG_DIRECT_MAP_1M,
32 PG_DIRECT_MAP_2G,
33 PG_DIRECT_MAP_MAX
34};
35
36extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
37
38static inline void update_page_count(int level, long count)
39{
40 if (IS_ENABLED(CONFIG_PROC_FS))
41 atomic_long_add(count, &direct_pages_count[level]);
42}
43
44struct seq_file;
45void arch_report_meminfo(struct seq_file *m);
46
47/*
48 * The S390 doesn't have any external MMU info: the kernel page
49 * tables contain all the necessary information.
50 */
51#define update_mmu_cache(vma, address, ptep) do { } while (0)
52#define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
53
54/*
55 * ZERO_PAGE is a global shared page that is always zero; used
56 * for zero-mapped memory areas etc..
57 */
58
59extern unsigned long empty_zero_page;
60extern unsigned long zero_page_mask;
61
62#define ZERO_PAGE(vaddr) \
63 (virt_to_page((void *)(empty_zero_page + \
64 (((unsigned long)(vaddr)) &zero_page_mask))))
65#define __HAVE_COLOR_ZERO_PAGE
66
67/* TODO: s390 cannot support io_remap_pfn_range... */
68
69#define pte_ERROR(e) \
70 pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
71#define pmd_ERROR(e) \
72 pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
73#define pud_ERROR(e) \
74 pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
75#define p4d_ERROR(e) \
76 pr_err("%s:%d: bad p4d %016lx.\n", __FILE__, __LINE__, p4d_val(e))
77#define pgd_ERROR(e) \
78 pr_err("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
79
80/*
81 * The vmalloc and module area will always be on the topmost area of the
82 * kernel mapping. 512GB are reserved for vmalloc by default.
83 * At the top of the vmalloc area a 2GB area is reserved where modules
84 * will reside. That makes sure that inter module branches always
85 * happen without trampolines and in addition the placement within a
86 * 2GB frame is branch prediction unit friendly.
87 */
88extern unsigned long __bootdata_preserved(VMALLOC_START);
89extern unsigned long __bootdata_preserved(VMALLOC_END);
90#define VMALLOC_DEFAULT_SIZE ((512UL << 30) - MODULES_LEN)
91extern struct page *__bootdata_preserved(vmemmap);
92extern unsigned long __bootdata_preserved(vmemmap_size);
93
94#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
95
96extern unsigned long __bootdata_preserved(MODULES_VADDR);
97extern unsigned long __bootdata_preserved(MODULES_END);
98#define MODULES_VADDR MODULES_VADDR
99#define MODULES_END MODULES_END
100#define MODULES_LEN (1UL << 31)
101
102static inline int is_module_addr(void *addr)
103{
104 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
105 if (addr < (void *)MODULES_VADDR)
106 return 0;
107 if (addr > (void *)MODULES_END)
108 return 0;
109 return 1;
110}
111
112/*
113 * A 64 bit pagetable entry of S390 has following format:
114 * | PFRA |0IPC| OS |
115 * 0000000000111111111122222222223333333333444444444455555555556666
116 * 0123456789012345678901234567890123456789012345678901234567890123
117 *
118 * I Page-Invalid Bit: Page is not available for address-translation
119 * P Page-Protection Bit: Store access not possible for page
120 * C Change-bit override: HW is not required to set change bit
121 *
122 * A 64 bit segmenttable entry of S390 has following format:
123 * | P-table origin | TT
124 * 0000000000111111111122222222223333333333444444444455555555556666
125 * 0123456789012345678901234567890123456789012345678901234567890123
126 *
127 * I Segment-Invalid Bit: Segment is not available for address-translation
128 * C Common-Segment Bit: Segment is not private (PoP 3-30)
129 * P Page-Protection Bit: Store access not possible for page
130 * TT Type 00
131 *
132 * A 64 bit region table entry of S390 has following format:
133 * | S-table origin | TF TTTL
134 * 0000000000111111111122222222223333333333444444444455555555556666
135 * 0123456789012345678901234567890123456789012345678901234567890123
136 *
137 * I Segment-Invalid Bit: Segment is not available for address-translation
138 * TT Type 01
139 * TF
140 * TL Table length
141 *
142 * The 64 bit regiontable origin of S390 has following format:
143 * | region table origon | DTTL
144 * 0000000000111111111122222222223333333333444444444455555555556666
145 * 0123456789012345678901234567890123456789012345678901234567890123
146 *
147 * X Space-Switch event:
148 * G Segment-Invalid Bit:
149 * P Private-Space Bit:
150 * S Storage-Alteration:
151 * R Real space
152 * TL Table-Length:
153 *
154 * A storage key has the following format:
155 * | ACC |F|R|C|0|
156 * 0 3 4 5 6 7
157 * ACC: access key
158 * F : fetch protection bit
159 * R : referenced bit
160 * C : changed bit
161 */
162
163/* Hardware bits in the page table entry */
164#define _PAGE_NOEXEC 0x100 /* HW no-execute bit */
165#define _PAGE_PROTECT 0x200 /* HW read-only bit */
166#define _PAGE_INVALID 0x400 /* HW invalid bit */
167#define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
168
169/* Software bits in the page table entry */
170#define _PAGE_PRESENT 0x001 /* SW pte present bit */
171#define _PAGE_YOUNG 0x004 /* SW pte young bit */
172#define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
173#define _PAGE_READ 0x010 /* SW pte read bit */
174#define _PAGE_WRITE 0x020 /* SW pte write bit */
175#define _PAGE_SPECIAL 0x040 /* SW associated with special page */
176#define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */
177
178#ifdef CONFIG_MEM_SOFT_DIRTY
179#define _PAGE_SOFT_DIRTY 0x002 /* SW pte soft dirty bit */
180#else
181#define _PAGE_SOFT_DIRTY 0x000
182#endif
183
184/* Set of bits not changed in pte_modify */
185#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
186 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
187
188/*
189 * handle_pte_fault uses pte_present and pte_none to find out the pte type
190 * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
191 * distinguish present from not-present ptes. It is changed only with the page
192 * table lock held.
193 *
194 * The following table gives the different possible bit combinations for
195 * the pte hardware and software bits in the last 12 bits of a pte
196 * (. unassigned bit, x don't care, t swap type):
197 *
198 * 842100000000
199 * 000084210000
200 * 000000008421
201 * .IR.uswrdy.p
202 * empty .10.00000000
203 * swap .11..ttttt.0
204 * prot-none, clean, old .11.xx0000.1
205 * prot-none, clean, young .11.xx0001.1
206 * prot-none, dirty, old .11.xx0010.1
207 * prot-none, dirty, young .11.xx0011.1
208 * read-only, clean, old .11.xx0100.1
209 * read-only, clean, young .01.xx0101.1
210 * read-only, dirty, old .11.xx0110.1
211 * read-only, dirty, young .01.xx0111.1
212 * read-write, clean, old .11.xx1100.1
213 * read-write, clean, young .01.xx1101.1
214 * read-write, dirty, old .10.xx1110.1
215 * read-write, dirty, young .00.xx1111.1
216 * HW-bits: R read-only, I invalid
217 * SW-bits: p present, y young, d dirty, r read, w write, s special,
218 * u unused, l large
219 *
220 * pte_none is true for the bit pattern .10.00000000, pte == 0x400
221 * pte_swap is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
222 * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
223 */
224
225/* Bits in the segment/region table address-space-control-element */
226#define _ASCE_ORIGIN ~0xfffUL/* region/segment table origin */
227#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
228#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
229#define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
230#define _ASCE_REAL_SPACE 0x20 /* real space control */
231#define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
232#define _ASCE_TYPE_REGION1 0x0c /* region first table type */
233#define _ASCE_TYPE_REGION2 0x08 /* region second table type */
234#define _ASCE_TYPE_REGION3 0x04 /* region third table type */
235#define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
236#define _ASCE_TABLE_LENGTH 0x03 /* region table length */
237
238/* Bits in the region table entry */
239#define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
240#define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
241#define _REGION_ENTRY_NOEXEC 0x100 /* region no-execute bit */
242#define _REGION_ENTRY_OFFSET 0xc0 /* region table offset */
243#define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
244#define _REGION_ENTRY_TYPE_MASK 0x0c /* region table type mask */
245#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
246#define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
247#define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
248#define _REGION_ENTRY_LENGTH 0x03 /* region third length */
249
250#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
251#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
252#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
253#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
254#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
255#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
256
257#define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address */
258#define _REGION3_ENTRY_DIRTY 0x2000 /* SW region dirty bit */
259#define _REGION3_ENTRY_YOUNG 0x1000 /* SW region young bit */
260#define _REGION3_ENTRY_LARGE 0x0400 /* RTTE-format control, large page */
261#define _REGION3_ENTRY_READ 0x0002 /* SW region read bit */
262#define _REGION3_ENTRY_WRITE 0x0001 /* SW region write bit */
263
264#ifdef CONFIG_MEM_SOFT_DIRTY
265#define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */
266#else
267#define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */
268#endif
269
270#define _REGION_ENTRY_BITS 0xfffffffffffff22fUL
271
272/* Bits in the segment table entry */
273#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
274#define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe30UL
275#define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff00730UL
276#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
277#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* page table origin */
278#define _SEGMENT_ENTRY_PROTECT 0x200 /* segment protection bit */
279#define _SEGMENT_ENTRY_NOEXEC 0x100 /* segment no-execute bit */
280#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
281#define _SEGMENT_ENTRY_TYPE_MASK 0x0c /* segment table type mask */
282
283#define _SEGMENT_ENTRY (0)
284#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
285
286#define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */
287#define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
288#define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
289#define _SEGMENT_ENTRY_WRITE 0x0002 /* SW segment write bit */
290#define _SEGMENT_ENTRY_READ 0x0001 /* SW segment read bit */
291
292#ifdef CONFIG_MEM_SOFT_DIRTY
293#define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */
294#else
295#define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */
296#endif
297
298#define _CRST_ENTRIES 2048 /* number of region/segment table entries */
299#define _PAGE_ENTRIES 256 /* number of page table entries */
300
301#define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8)
302#define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8)
303
304#define _REGION1_SHIFT 53
305#define _REGION2_SHIFT 42
306#define _REGION3_SHIFT 31
307#define _SEGMENT_SHIFT 20
308
309#define _REGION1_INDEX (0x7ffUL << _REGION1_SHIFT)
310#define _REGION2_INDEX (0x7ffUL << _REGION2_SHIFT)
311#define _REGION3_INDEX (0x7ffUL << _REGION3_SHIFT)
312#define _SEGMENT_INDEX (0x7ffUL << _SEGMENT_SHIFT)
313#define _PAGE_INDEX (0xffUL << _PAGE_SHIFT)
314
315#define _REGION1_SIZE (1UL << _REGION1_SHIFT)
316#define _REGION2_SIZE (1UL << _REGION2_SHIFT)
317#define _REGION3_SIZE (1UL << _REGION3_SHIFT)
318#define _SEGMENT_SIZE (1UL << _SEGMENT_SHIFT)
319
320#define _REGION1_MASK (~(_REGION1_SIZE - 1))
321#define _REGION2_MASK (~(_REGION2_SIZE - 1))
322#define _REGION3_MASK (~(_REGION3_SIZE - 1))
323#define _SEGMENT_MASK (~(_SEGMENT_SIZE - 1))
324
325#define PMD_SHIFT _SEGMENT_SHIFT
326#define PUD_SHIFT _REGION3_SHIFT
327#define P4D_SHIFT _REGION2_SHIFT
328#define PGDIR_SHIFT _REGION1_SHIFT
329
330#define PMD_SIZE _SEGMENT_SIZE
331#define PUD_SIZE _REGION3_SIZE
332#define P4D_SIZE _REGION2_SIZE
333#define PGDIR_SIZE _REGION1_SIZE
334
335#define PMD_MASK _SEGMENT_MASK
336#define PUD_MASK _REGION3_MASK
337#define P4D_MASK _REGION2_MASK
338#define PGDIR_MASK _REGION1_MASK
339
340#define PTRS_PER_PTE _PAGE_ENTRIES
341#define PTRS_PER_PMD _CRST_ENTRIES
342#define PTRS_PER_PUD _CRST_ENTRIES
343#define PTRS_PER_P4D _CRST_ENTRIES
344#define PTRS_PER_PGD _CRST_ENTRIES
345
346/*
347 * Segment table and region3 table entry encoding
348 * (R = read-only, I = invalid, y = young bit):
349 * dy..R...I...wr
350 * prot-none, clean, old 00..1...1...00
351 * prot-none, clean, young 01..1...1...00
352 * prot-none, dirty, old 10..1...1...00
353 * prot-none, dirty, young 11..1...1...00
354 * read-only, clean, old 00..1...1...01
355 * read-only, clean, young 01..1...0...01
356 * read-only, dirty, old 10..1...1...01
357 * read-only, dirty, young 11..1...0...01
358 * read-write, clean, old 00..1...1...11
359 * read-write, clean, young 01..1...0...11
360 * read-write, dirty, old 10..0...1...11
361 * read-write, dirty, young 11..0...0...11
362 * The segment table origin is used to distinguish empty (origin==0) from
363 * read-write, old segment table entries (origin!=0)
364 * HW-bits: R read-only, I invalid
365 * SW-bits: y young, d dirty, r read, w write
366 */
367
368/* Page status table bits for virtualization */
369#define PGSTE_ACC_BITS 0xf000000000000000UL
370#define PGSTE_FP_BIT 0x0800000000000000UL
371#define PGSTE_PCL_BIT 0x0080000000000000UL
372#define PGSTE_HR_BIT 0x0040000000000000UL
373#define PGSTE_HC_BIT 0x0020000000000000UL
374#define PGSTE_GR_BIT 0x0004000000000000UL
375#define PGSTE_GC_BIT 0x0002000000000000UL
376#define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */
377#define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */
378#define PGSTE_VSIE_BIT 0x0000200000000000UL /* ref'd in a shadow table */
379
380/* Guest Page State used for virtualization */
381#define _PGSTE_GPS_ZERO 0x0000000080000000UL
382#define _PGSTE_GPS_NODAT 0x0000000040000000UL
383#define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
384#define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
385#define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
386#define _PGSTE_GPS_USAGE_POT_VOLATILE 0x0000000002000000UL
387#define _PGSTE_GPS_USAGE_VOLATILE _PGSTE_GPS_USAGE_MASK
388
389/*
390 * A user page table pointer has the space-switch-event bit, the
391 * private-space-control bit and the storage-alteration-event-control
392 * bit set. A kernel page table pointer doesn't need them.
393 */
394#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
395 _ASCE_ALT_EVENT)
396
397/*
398 * Page protection definitions.
399 */
400#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
401#define PAGE_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | \
402 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
403#define PAGE_RX __pgprot(_PAGE_PRESENT | _PAGE_READ | \
404 _PAGE_INVALID | _PAGE_PROTECT)
405#define PAGE_RW __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
406 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
407#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
408 _PAGE_INVALID | _PAGE_PROTECT)
409
410#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
411 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
412#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
413 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
414#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
415 _PAGE_PROTECT | _PAGE_NOEXEC)
416#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
417 _PAGE_YOUNG | _PAGE_DIRTY)
418
419/*
420 * On s390 the page table entry has an invalid bit and a read-only bit.
421 * Read permission implies execute permission and write permission
422 * implies read permission.
423 */
424 /*xwr*/
425#define __P000 PAGE_NONE
426#define __P001 PAGE_RO
427#define __P010 PAGE_RO
428#define __P011 PAGE_RO
429#define __P100 PAGE_RX
430#define __P101 PAGE_RX
431#define __P110 PAGE_RX
432#define __P111 PAGE_RX
433
434#define __S000 PAGE_NONE
435#define __S001 PAGE_RO
436#define __S010 PAGE_RW
437#define __S011 PAGE_RW
438#define __S100 PAGE_RX
439#define __S101 PAGE_RX
440#define __S110 PAGE_RWX
441#define __S111 PAGE_RWX
442
443/*
444 * Segment entry (large page) protection definitions.
445 */
446#define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
447 _SEGMENT_ENTRY_PROTECT)
448#define SEGMENT_RO __pgprot(_SEGMENT_ENTRY_PROTECT | \
449 _SEGMENT_ENTRY_READ | \
450 _SEGMENT_ENTRY_NOEXEC)
451#define SEGMENT_RX __pgprot(_SEGMENT_ENTRY_PROTECT | \
452 _SEGMENT_ENTRY_READ)
453#define SEGMENT_RW __pgprot(_SEGMENT_ENTRY_READ | \
454 _SEGMENT_ENTRY_WRITE | \
455 _SEGMENT_ENTRY_NOEXEC)
456#define SEGMENT_RWX __pgprot(_SEGMENT_ENTRY_READ | \
457 _SEGMENT_ENTRY_WRITE)
458#define SEGMENT_KERNEL __pgprot(_SEGMENT_ENTRY | \
459 _SEGMENT_ENTRY_LARGE | \
460 _SEGMENT_ENTRY_READ | \
461 _SEGMENT_ENTRY_WRITE | \
462 _SEGMENT_ENTRY_YOUNG | \
463 _SEGMENT_ENTRY_DIRTY | \
464 _SEGMENT_ENTRY_NOEXEC)
465#define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY | \
466 _SEGMENT_ENTRY_LARGE | \
467 _SEGMENT_ENTRY_READ | \
468 _SEGMENT_ENTRY_YOUNG | \
469 _SEGMENT_ENTRY_PROTECT | \
470 _SEGMENT_ENTRY_NOEXEC)
471#define SEGMENT_KERNEL_EXEC __pgprot(_SEGMENT_ENTRY | \
472 _SEGMENT_ENTRY_LARGE | \
473 _SEGMENT_ENTRY_READ | \
474 _SEGMENT_ENTRY_WRITE | \
475 _SEGMENT_ENTRY_YOUNG | \
476 _SEGMENT_ENTRY_DIRTY)
477
478/*
479 * Region3 entry (large page) protection definitions.
480 */
481
482#define REGION3_KERNEL __pgprot(_REGION_ENTRY_TYPE_R3 | \
483 _REGION3_ENTRY_LARGE | \
484 _REGION3_ENTRY_READ | \
485 _REGION3_ENTRY_WRITE | \
486 _REGION3_ENTRY_YOUNG | \
487 _REGION3_ENTRY_DIRTY | \
488 _REGION_ENTRY_NOEXEC)
489#define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
490 _REGION3_ENTRY_LARGE | \
491 _REGION3_ENTRY_READ | \
492 _REGION3_ENTRY_YOUNG | \
493 _REGION_ENTRY_PROTECT | \
494 _REGION_ENTRY_NOEXEC)
495
496static inline bool mm_p4d_folded(struct mm_struct *mm)
497{
498 return mm->context.asce_limit <= _REGION1_SIZE;
499}
500#define mm_p4d_folded(mm) mm_p4d_folded(mm)
501
502static inline bool mm_pud_folded(struct mm_struct *mm)
503{
504 return mm->context.asce_limit <= _REGION2_SIZE;
505}
506#define mm_pud_folded(mm) mm_pud_folded(mm)
507
508static inline bool mm_pmd_folded(struct mm_struct *mm)
509{
510 return mm->context.asce_limit <= _REGION3_SIZE;
511}
512#define mm_pmd_folded(mm) mm_pmd_folded(mm)
513
514static inline int mm_has_pgste(struct mm_struct *mm)
515{
516#ifdef CONFIG_PGSTE
517 if (unlikely(mm->context.has_pgste))
518 return 1;
519#endif
520 return 0;
521}
522
523static inline int mm_is_protected(struct mm_struct *mm)
524{
525#ifdef CONFIG_PGSTE
526 if (unlikely(atomic_read(&mm->context.is_protected)))
527 return 1;
528#endif
529 return 0;
530}
531
532static inline int mm_alloc_pgste(struct mm_struct *mm)
533{
534#ifdef CONFIG_PGSTE
535 if (unlikely(mm->context.alloc_pgste))
536 return 1;
537#endif
538 return 0;
539}
540
541static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
542{
543 return __pte(pte_val(pte) & ~pgprot_val(prot));
544}
545
546static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
547{
548 return __pte(pte_val(pte) | pgprot_val(prot));
549}
550
551static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot)
552{
553 return __pmd(pmd_val(pmd) & ~pgprot_val(prot));
554}
555
556static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
557{
558 return __pmd(pmd_val(pmd) | pgprot_val(prot));
559}
560
561static inline pud_t clear_pud_bit(pud_t pud, pgprot_t prot)
562{
563 return __pud(pud_val(pud) & ~pgprot_val(prot));
564}
565
566static inline pud_t set_pud_bit(pud_t pud, pgprot_t prot)
567{
568 return __pud(pud_val(pud) | pgprot_val(prot));
569}
570
571/*
572 * In the case that a guest uses storage keys
573 * faults should no longer be backed by zero pages
574 */
575#define mm_forbids_zeropage mm_has_pgste
576static inline int mm_uses_skeys(struct mm_struct *mm)
577{
578#ifdef CONFIG_PGSTE
579 if (mm->context.uses_skeys)
580 return 1;
581#endif
582 return 0;
583}
584
585static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new)
586{
587 union register_pair r1 = { .even = old, .odd = new, };
588 unsigned long address = (unsigned long)ptr | 1;
589
590 asm volatile(
591 " csp %[r1],%[address]"
592 : [r1] "+&d" (r1.pair), "+m" (*ptr)
593 : [address] "d" (address)
594 : "cc");
595}
596
597static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new)
598{
599 union register_pair r1 = { .even = old, .odd = new, };
600 unsigned long address = (unsigned long)ptr | 1;
601
602 asm volatile(
603 " cspg %[r1],%[address]"
604 : [r1] "+&d" (r1.pair), "+m" (*ptr)
605 : [address] "d" (address)
606 : "cc");
607}
608
609#define CRDTE_DTT_PAGE 0x00UL
610#define CRDTE_DTT_SEGMENT 0x10UL
611#define CRDTE_DTT_REGION3 0x14UL
612#define CRDTE_DTT_REGION2 0x18UL
613#define CRDTE_DTT_REGION1 0x1cUL
614
615static inline void crdte(unsigned long old, unsigned long new,
616 unsigned long *table, unsigned long dtt,
617 unsigned long address, unsigned long asce)
618{
619 union register_pair r1 = { .even = old, .odd = new, };
620 union register_pair r2 = { .even = __pa(table) | dtt, .odd = address, };
621
622 asm volatile(".insn rrf,0xb98f0000,%[r1],%[r2],%[asce],0"
623 : [r1] "+&d" (r1.pair)
624 : [r2] "d" (r2.pair), [asce] "a" (asce)
625 : "memory", "cc");
626}
627
628/*
629 * pgd/p4d/pud/pmd/pte query functions
630 */
631static inline int pgd_folded(pgd_t pgd)
632{
633 return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1;
634}
635
636static inline int pgd_present(pgd_t pgd)
637{
638 if (pgd_folded(pgd))
639 return 1;
640 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
641}
642
643static inline int pgd_none(pgd_t pgd)
644{
645 if (pgd_folded(pgd))
646 return 0;
647 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
648}
649
650static inline int pgd_bad(pgd_t pgd)
651{
652 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1)
653 return 0;
654 return (pgd_val(pgd) & ~_REGION_ENTRY_BITS) != 0;
655}
656
657static inline unsigned long pgd_pfn(pgd_t pgd)
658{
659 unsigned long origin_mask;
660
661 origin_mask = _REGION_ENTRY_ORIGIN;
662 return (pgd_val(pgd) & origin_mask) >> PAGE_SHIFT;
663}
664
665static inline int p4d_folded(p4d_t p4d)
666{
667 return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2;
668}
669
670static inline int p4d_present(p4d_t p4d)
671{
672 if (p4d_folded(p4d))
673 return 1;
674 return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL;
675}
676
677static inline int p4d_none(p4d_t p4d)
678{
679 if (p4d_folded(p4d))
680 return 0;
681 return p4d_val(p4d) == _REGION2_ENTRY_EMPTY;
682}
683
684static inline unsigned long p4d_pfn(p4d_t p4d)
685{
686 unsigned long origin_mask;
687
688 origin_mask = _REGION_ENTRY_ORIGIN;
689 return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT;
690}
691
692static inline int pud_folded(pud_t pud)
693{
694 return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3;
695}
696
697static inline int pud_present(pud_t pud)
698{
699 if (pud_folded(pud))
700 return 1;
701 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
702}
703
704static inline int pud_none(pud_t pud)
705{
706 if (pud_folded(pud))
707 return 0;
708 return pud_val(pud) == _REGION3_ENTRY_EMPTY;
709}
710
711#define pud_leaf pud_large
712static inline int pud_large(pud_t pud)
713{
714 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
715 return 0;
716 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
717}
718
719#define pmd_leaf pmd_large
720static inline int pmd_large(pmd_t pmd)
721{
722 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
723}
724
725static inline int pmd_bad(pmd_t pmd)
726{
727 if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_large(pmd))
728 return 1;
729 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
730}
731
732static inline int pud_bad(pud_t pud)
733{
734 unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
735
736 if (type > _REGION_ENTRY_TYPE_R3 || pud_large(pud))
737 return 1;
738 if (type < _REGION_ENTRY_TYPE_R3)
739 return 0;
740 return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
741}
742
743static inline int p4d_bad(p4d_t p4d)
744{
745 unsigned long type = p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK;
746
747 if (type > _REGION_ENTRY_TYPE_R2)
748 return 1;
749 if (type < _REGION_ENTRY_TYPE_R2)
750 return 0;
751 return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0;
752}
753
754static inline int pmd_present(pmd_t pmd)
755{
756 return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY;
757}
758
759static inline int pmd_none(pmd_t pmd)
760{
761 return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
762}
763
764#define pmd_write pmd_write
765static inline int pmd_write(pmd_t pmd)
766{
767 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
768}
769
770#define pud_write pud_write
771static inline int pud_write(pud_t pud)
772{
773 return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0;
774}
775
776static inline int pmd_dirty(pmd_t pmd)
777{
778 return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
779}
780
781static inline int pmd_young(pmd_t pmd)
782{
783 return (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
784}
785
786static inline int pte_present(pte_t pte)
787{
788 /* Bit pattern: (pte & 0x001) == 0x001 */
789 return (pte_val(pte) & _PAGE_PRESENT) != 0;
790}
791
792static inline int pte_none(pte_t pte)
793{
794 /* Bit pattern: pte == 0x400 */
795 return pte_val(pte) == _PAGE_INVALID;
796}
797
798static inline int pte_swap(pte_t pte)
799{
800 /* Bit pattern: (pte & 0x201) == 0x200 */
801 return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
802 == _PAGE_PROTECT;
803}
804
805static inline int pte_special(pte_t pte)
806{
807 return (pte_val(pte) & _PAGE_SPECIAL);
808}
809
810#define __HAVE_ARCH_PTE_SAME
811static inline int pte_same(pte_t a, pte_t b)
812{
813 return pte_val(a) == pte_val(b);
814}
815
816#ifdef CONFIG_NUMA_BALANCING
817static inline int pte_protnone(pte_t pte)
818{
819 return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
820}
821
822static inline int pmd_protnone(pmd_t pmd)
823{
824 /* pmd_large(pmd) implies pmd_present(pmd) */
825 return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
826}
827#endif
828
829static inline int pte_soft_dirty(pte_t pte)
830{
831 return pte_val(pte) & _PAGE_SOFT_DIRTY;
832}
833#define pte_swp_soft_dirty pte_soft_dirty
834
835static inline pte_t pte_mksoft_dirty(pte_t pte)
836{
837 return set_pte_bit(pte, __pgprot(_PAGE_SOFT_DIRTY));
838}
839#define pte_swp_mksoft_dirty pte_mksoft_dirty
840
841static inline pte_t pte_clear_soft_dirty(pte_t pte)
842{
843 return clear_pte_bit(pte, __pgprot(_PAGE_SOFT_DIRTY));
844}
845#define pte_swp_clear_soft_dirty pte_clear_soft_dirty
846
847static inline int pmd_soft_dirty(pmd_t pmd)
848{
849 return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
850}
851
852static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
853{
854 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_SOFT_DIRTY));
855}
856
857static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
858{
859 return clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_SOFT_DIRTY));
860}
861
862/*
863 * query functions pte_write/pte_dirty/pte_young only work if
864 * pte_present() is true. Undefined behaviour if not..
865 */
866static inline int pte_write(pte_t pte)
867{
868 return (pte_val(pte) & _PAGE_WRITE) != 0;
869}
870
871static inline int pte_dirty(pte_t pte)
872{
873 return (pte_val(pte) & _PAGE_DIRTY) != 0;
874}
875
876static inline int pte_young(pte_t pte)
877{
878 return (pte_val(pte) & _PAGE_YOUNG) != 0;
879}
880
881#define __HAVE_ARCH_PTE_UNUSED
882static inline int pte_unused(pte_t pte)
883{
884 return pte_val(pte) & _PAGE_UNUSED;
885}
886
887/*
888 * Extract the pgprot value from the given pte while at the same time making it
889 * usable for kernel address space mappings where fault driven dirty and
890 * young/old accounting is not supported, i.e _PAGE_PROTECT and _PAGE_INVALID
891 * must not be set.
892 */
893static inline pgprot_t pte_pgprot(pte_t pte)
894{
895 unsigned long pte_flags = pte_val(pte) & _PAGE_CHG_MASK;
896
897 if (pte_write(pte))
898 pte_flags |= pgprot_val(PAGE_KERNEL);
899 else
900 pte_flags |= pgprot_val(PAGE_KERNEL_RO);
901 pte_flags |= pte_val(pte) & mio_wb_bit_mask;
902
903 return __pgprot(pte_flags);
904}
905
906/*
907 * pgd/pmd/pte modification functions
908 */
909
910static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
911{
912 WRITE_ONCE(*pgdp, pgd);
913}
914
915static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
916{
917 WRITE_ONCE(*p4dp, p4d);
918}
919
920static inline void set_pud(pud_t *pudp, pud_t pud)
921{
922 WRITE_ONCE(*pudp, pud);
923}
924
925static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
926{
927 WRITE_ONCE(*pmdp, pmd);
928}
929
930static inline void set_pte(pte_t *ptep, pte_t pte)
931{
932 WRITE_ONCE(*ptep, pte);
933}
934
935static inline void pgd_clear(pgd_t *pgd)
936{
937 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
938 set_pgd(pgd, __pgd(_REGION1_ENTRY_EMPTY));
939}
940
941static inline void p4d_clear(p4d_t *p4d)
942{
943 if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
944 set_p4d(p4d, __p4d(_REGION2_ENTRY_EMPTY));
945}
946
947static inline void pud_clear(pud_t *pud)
948{
949 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
950 set_pud(pud, __pud(_REGION3_ENTRY_EMPTY));
951}
952
953static inline void pmd_clear(pmd_t *pmdp)
954{
955 set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
956}
957
958static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
959{
960 set_pte(ptep, __pte(_PAGE_INVALID));
961}
962
963/*
964 * The following pte modification functions only work if
965 * pte_present() is true. Undefined behaviour if not..
966 */
967static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
968{
969 pte = clear_pte_bit(pte, __pgprot(~_PAGE_CHG_MASK));
970 pte = set_pte_bit(pte, newprot);
971 /*
972 * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX
973 * has the invalid bit set, clear it again for readable, young pages
974 */
975 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
976 pte = clear_pte_bit(pte, __pgprot(_PAGE_INVALID));
977 /*
978 * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page
979 * protection bit set, clear it again for writable, dirty pages
980 */
981 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
982 pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT));
983 return pte;
984}
985
986static inline pte_t pte_wrprotect(pte_t pte)
987{
988 pte = clear_pte_bit(pte, __pgprot(_PAGE_WRITE));
989 return set_pte_bit(pte, __pgprot(_PAGE_PROTECT));
990}
991
992static inline pte_t pte_mkwrite(pte_t pte)
993{
994 pte = set_pte_bit(pte, __pgprot(_PAGE_WRITE));
995 if (pte_val(pte) & _PAGE_DIRTY)
996 pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT));
997 return pte;
998}
999
1000static inline pte_t pte_mkclean(pte_t pte)
1001{
1002 pte = clear_pte_bit(pte, __pgprot(_PAGE_DIRTY));
1003 return set_pte_bit(pte, __pgprot(_PAGE_PROTECT));
1004}
1005
1006static inline pte_t pte_mkdirty(pte_t pte)
1007{
1008 pte = set_pte_bit(pte, __pgprot(_PAGE_DIRTY | _PAGE_SOFT_DIRTY));
1009 if (pte_val(pte) & _PAGE_WRITE)
1010 pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT));
1011 return pte;
1012}
1013
1014static inline pte_t pte_mkold(pte_t pte)
1015{
1016 pte = clear_pte_bit(pte, __pgprot(_PAGE_YOUNG));
1017 return set_pte_bit(pte, __pgprot(_PAGE_INVALID));
1018}
1019
1020static inline pte_t pte_mkyoung(pte_t pte)
1021{
1022 pte = set_pte_bit(pte, __pgprot(_PAGE_YOUNG));
1023 if (pte_val(pte) & _PAGE_READ)
1024 pte = clear_pte_bit(pte, __pgprot(_PAGE_INVALID));
1025 return pte;
1026}
1027
1028static inline pte_t pte_mkspecial(pte_t pte)
1029{
1030 return set_pte_bit(pte, __pgprot(_PAGE_SPECIAL));
1031}
1032
1033#ifdef CONFIG_HUGETLB_PAGE
1034static inline pte_t pte_mkhuge(pte_t pte)
1035{
1036 return set_pte_bit(pte, __pgprot(_PAGE_LARGE));
1037}
1038#endif
1039
1040#define IPTE_GLOBAL 0
1041#define IPTE_LOCAL 1
1042
1043#define IPTE_NODAT 0x400
1044#define IPTE_GUEST_ASCE 0x800
1045
1046static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep,
1047 unsigned long opt, unsigned long asce,
1048 int local)
1049{
1050 unsigned long pto = __pa(ptep);
1051
1052 if (__builtin_constant_p(opt) && opt == 0) {
1053 /* Invalidation + TLB flush for the pte */
1054 asm volatile(
1055 " ipte %[r1],%[r2],0,%[m4]"
1056 : "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
1057 [m4] "i" (local));
1058 return;
1059 }
1060
1061 /* Invalidate ptes with options + TLB flush of the ptes */
1062 opt = opt | (asce & _ASCE_ORIGIN);
1063 asm volatile(
1064 " ipte %[r1],%[r2],%[r3],%[m4]"
1065 : [r2] "+a" (address), [r3] "+a" (opt)
1066 : [r1] "a" (pto), [m4] "i" (local) : "memory");
1067}
1068
1069static __always_inline void __ptep_ipte_range(unsigned long address, int nr,
1070 pte_t *ptep, int local)
1071{
1072 unsigned long pto = __pa(ptep);
1073
1074 /* Invalidate a range of ptes + TLB flush of the ptes */
1075 do {
1076 asm volatile(
1077 " ipte %[r1],%[r2],%[r3],%[m4]"
1078 : [r2] "+a" (address), [r3] "+a" (nr)
1079 : [r1] "a" (pto), [m4] "i" (local) : "memory");
1080 } while (nr != 255);
1081}
1082
1083/*
1084 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
1085 * both clear the TLB for the unmapped pte. The reason is that
1086 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
1087 * to modify an active pte. The sequence is
1088 * 1) ptep_get_and_clear
1089 * 2) set_pte_at
1090 * 3) flush_tlb_range
1091 * On s390 the tlb needs to get flushed with the modification of the pte
1092 * if the pte is active. The only way how this can be implemented is to
1093 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
1094 * is a nop.
1095 */
1096pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t);
1097pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t);
1098
1099#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1100static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1101 unsigned long addr, pte_t *ptep)
1102{
1103 pte_t pte = *ptep;
1104
1105 pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
1106 return pte_young(pte);
1107}
1108
1109#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1110static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1111 unsigned long address, pte_t *ptep)
1112{
1113 return ptep_test_and_clear_young(vma, address, ptep);
1114}
1115
1116#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1117static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1118 unsigned long addr, pte_t *ptep)
1119{
1120 pte_t res;
1121
1122 res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1123 /* At this point the reference through the mapping is still present */
1124 if (mm_is_protected(mm) && pte_present(res))
1125 uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK);
1126 return res;
1127}
1128
1129#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1130pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *);
1131void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long,
1132 pte_t *, pte_t, pte_t);
1133
1134#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
1135static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1136 unsigned long addr, pte_t *ptep)
1137{
1138 pte_t res;
1139
1140 res = ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
1141 /* At this point the reference through the mapping is still present */
1142 if (mm_is_protected(vma->vm_mm) && pte_present(res))
1143 uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK);
1144 return res;
1145}
1146
1147/*
1148 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1149 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1150 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1151 * cannot be accessed while the batched unmap is running. In this case
1152 * full==1 and a simple pte_clear is enough. See tlb.h.
1153 */
1154#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1155static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1156 unsigned long addr,
1157 pte_t *ptep, int full)
1158{
1159 pte_t res;
1160
1161 if (full) {
1162 res = *ptep;
1163 set_pte(ptep, __pte(_PAGE_INVALID));
1164 } else {
1165 res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1166 }
1167 /* At this point the reference through the mapping is still present */
1168 if (mm_is_protected(mm) && pte_present(res))
1169 uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK);
1170 return res;
1171}
1172
1173#define __HAVE_ARCH_PTEP_SET_WRPROTECT
1174static inline void ptep_set_wrprotect(struct mm_struct *mm,
1175 unsigned long addr, pte_t *ptep)
1176{
1177 pte_t pte = *ptep;
1178
1179 if (pte_write(pte))
1180 ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
1181}
1182
1183#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1184static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1185 unsigned long addr, pte_t *ptep,
1186 pte_t entry, int dirty)
1187{
1188 if (pte_same(*ptep, entry))
1189 return 0;
1190 ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
1191 return 1;
1192}
1193
1194/*
1195 * Additional functions to handle KVM guest page tables
1196 */
1197void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
1198 pte_t *ptep, pte_t entry);
1199void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1200void ptep_notify(struct mm_struct *mm, unsigned long addr,
1201 pte_t *ptep, unsigned long bits);
1202int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr,
1203 pte_t *ptep, int prot, unsigned long bit);
1204void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
1205 pte_t *ptep , int reset);
1206void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1207int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
1208 pte_t *sptep, pte_t *tptep, pte_t pte);
1209void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
1210
1211bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address,
1212 pte_t *ptep);
1213int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1214 unsigned char key, bool nq);
1215int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1216 unsigned char key, unsigned char *oldkey,
1217 bool nq, bool mr, bool mc);
1218int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr);
1219int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1220 unsigned char *key);
1221
1222int set_pgste_bits(struct mm_struct *mm, unsigned long addr,
1223 unsigned long bits, unsigned long value);
1224int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep);
1225int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
1226 unsigned long *oldpte, unsigned long *oldpgste);
1227void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr);
1228void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr);
1229void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr);
1230void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
1231
1232#define pgprot_writecombine pgprot_writecombine
1233pgprot_t pgprot_writecombine(pgprot_t prot);
1234
1235#define pgprot_writethrough pgprot_writethrough
1236pgprot_t pgprot_writethrough(pgprot_t prot);
1237
1238/*
1239 * Certain architectures need to do special things when PTEs
1240 * within a page table are directly modified. Thus, the following
1241 * hook is made available.
1242 */
1243static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1244 pte_t *ptep, pte_t entry)
1245{
1246 if (pte_present(entry))
1247 entry = clear_pte_bit(entry, __pgprot(_PAGE_UNUSED));
1248 if (mm_has_pgste(mm))
1249 ptep_set_pte_at(mm, addr, ptep, entry);
1250 else
1251 set_pte(ptep, entry);
1252}
1253
1254/*
1255 * Conversion functions: convert a page and protection to a page entry,
1256 * and a page entry and page directory to the page they refer to.
1257 */
1258static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1259{
1260 pte_t __pte;
1261
1262 __pte = __pte(physpage | pgprot_val(pgprot));
1263 if (!MACHINE_HAS_NX)
1264 __pte = clear_pte_bit(__pte, __pgprot(_PAGE_NOEXEC));
1265 return pte_mkyoung(__pte);
1266}
1267
1268static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1269{
1270 unsigned long physpage = page_to_phys(page);
1271 pte_t __pte = mk_pte_phys(physpage, pgprot);
1272
1273 if (pte_write(__pte) && PageDirty(page))
1274 __pte = pte_mkdirty(__pte);
1275 return __pte;
1276}
1277
1278#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1279#define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
1280#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1281#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1282
1283#define p4d_deref(pud) ((unsigned long)__va(p4d_val(pud) & _REGION_ENTRY_ORIGIN))
1284#define pgd_deref(pgd) ((unsigned long)__va(pgd_val(pgd) & _REGION_ENTRY_ORIGIN))
1285
1286static inline unsigned long pmd_deref(pmd_t pmd)
1287{
1288 unsigned long origin_mask;
1289
1290 origin_mask = _SEGMENT_ENTRY_ORIGIN;
1291 if (pmd_large(pmd))
1292 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
1293 return (unsigned long)__va(pmd_val(pmd) & origin_mask);
1294}
1295
1296static inline unsigned long pmd_pfn(pmd_t pmd)
1297{
1298 return __pa(pmd_deref(pmd)) >> PAGE_SHIFT;
1299}
1300
1301static inline unsigned long pud_deref(pud_t pud)
1302{
1303 unsigned long origin_mask;
1304
1305 origin_mask = _REGION_ENTRY_ORIGIN;
1306 if (pud_large(pud))
1307 origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
1308 return (unsigned long)__va(pud_val(pud) & origin_mask);
1309}
1310
1311static inline unsigned long pud_pfn(pud_t pud)
1312{
1313 return __pa(pud_deref(pud)) >> PAGE_SHIFT;
1314}
1315
1316/*
1317 * The pgd_offset function *always* adds the index for the top-level
1318 * region/segment table. This is done to get a sequence like the
1319 * following to work:
1320 * pgdp = pgd_offset(current->mm, addr);
1321 * pgd = READ_ONCE(*pgdp);
1322 * p4dp = p4d_offset(&pgd, addr);
1323 * ...
1324 * The subsequent p4d_offset, pud_offset and pmd_offset functions
1325 * only add an index if they dereferenced the pointer.
1326 */
1327static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
1328{
1329 unsigned long rste;
1330 unsigned int shift;
1331
1332 /* Get the first entry of the top level table */
1333 rste = pgd_val(*pgd);
1334 /* Pick up the shift from the table type of the first entry */
1335 shift = ((rste & _REGION_ENTRY_TYPE_MASK) >> 2) * 11 + 20;
1336 return pgd + ((address >> shift) & (PTRS_PER_PGD - 1));
1337}
1338
1339#define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
1340
1341static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address)
1342{
1343 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
1344 return (p4d_t *) pgd_deref(pgd) + p4d_index(address);
1345 return (p4d_t *) pgdp;
1346}
1347#define p4d_offset_lockless p4d_offset_lockless
1348
1349static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address)
1350{
1351 return p4d_offset_lockless(pgdp, *pgdp, address);
1352}
1353
1354static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address)
1355{
1356 if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
1357 return (pud_t *) p4d_deref(p4d) + pud_index(address);
1358 return (pud_t *) p4dp;
1359}
1360#define pud_offset_lockless pud_offset_lockless
1361
1362static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address)
1363{
1364 return pud_offset_lockless(p4dp, *p4dp, address);
1365}
1366#define pud_offset pud_offset
1367
1368static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address)
1369{
1370 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
1371 return (pmd_t *) pud_deref(pud) + pmd_index(address);
1372 return (pmd_t *) pudp;
1373}
1374#define pmd_offset_lockless pmd_offset_lockless
1375
1376static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address)
1377{
1378 return pmd_offset_lockless(pudp, *pudp, address);
1379}
1380#define pmd_offset pmd_offset
1381
1382static inline unsigned long pmd_page_vaddr(pmd_t pmd)
1383{
1384 return (unsigned long) pmd_deref(pmd);
1385}
1386
1387static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
1388{
1389 return end <= current->mm->context.asce_limit;
1390}
1391#define gup_fast_permitted gup_fast_permitted
1392
1393#define pfn_pte(pfn, pgprot) mk_pte_phys(((pfn) << PAGE_SHIFT), (pgprot))
1394#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1395#define pte_page(x) pfn_to_page(pte_pfn(x))
1396
1397#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1398#define pud_page(pud) pfn_to_page(pud_pfn(pud))
1399#define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
1400#define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
1401
1402static inline pmd_t pmd_wrprotect(pmd_t pmd)
1403{
1404 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_WRITE));
1405 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1406}
1407
1408static inline pmd_t pmd_mkwrite(pmd_t pmd)
1409{
1410 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_WRITE));
1411 if (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)
1412 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1413 return pmd;
1414}
1415
1416static inline pmd_t pmd_mkclean(pmd_t pmd)
1417{
1418 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_DIRTY));
1419 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1420}
1421
1422static inline pmd_t pmd_mkdirty(pmd_t pmd)
1423{
1424 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_SOFT_DIRTY));
1425 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1426 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1427 return pmd;
1428}
1429
1430static inline pud_t pud_wrprotect(pud_t pud)
1431{
1432 pud = clear_pud_bit(pud, __pgprot(_REGION3_ENTRY_WRITE));
1433 return set_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
1434}
1435
1436static inline pud_t pud_mkwrite(pud_t pud)
1437{
1438 pud = set_pud_bit(pud, __pgprot(_REGION3_ENTRY_WRITE));
1439 if (pud_val(pud) & _REGION3_ENTRY_DIRTY)
1440 pud = clear_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
1441 return pud;
1442}
1443
1444static inline pud_t pud_mkclean(pud_t pud)
1445{
1446 pud = clear_pud_bit(pud, __pgprot(_REGION3_ENTRY_DIRTY));
1447 return set_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
1448}
1449
1450static inline pud_t pud_mkdirty(pud_t pud)
1451{
1452 pud = set_pud_bit(pud, __pgprot(_REGION3_ENTRY_DIRTY | _REGION3_ENTRY_SOFT_DIRTY));
1453 if (pud_val(pud) & _REGION3_ENTRY_WRITE)
1454 pud = clear_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
1455 return pud;
1456}
1457
1458#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1459static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1460{
1461 /*
1462 * pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX
1463 * (see __Pxxx / __Sxxx). Convert to segment table entry format.
1464 */
1465 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1466 return pgprot_val(SEGMENT_NONE);
1467 if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
1468 return pgprot_val(SEGMENT_RO);
1469 if (pgprot_val(pgprot) == pgprot_val(PAGE_RX))
1470 return pgprot_val(SEGMENT_RX);
1471 if (pgprot_val(pgprot) == pgprot_val(PAGE_RW))
1472 return pgprot_val(SEGMENT_RW);
1473 return pgprot_val(SEGMENT_RWX);
1474}
1475
1476static inline pmd_t pmd_mkyoung(pmd_t pmd)
1477{
1478 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG));
1479 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1480 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
1481 return pmd;
1482}
1483
1484static inline pmd_t pmd_mkold(pmd_t pmd)
1485{
1486 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG));
1487 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
1488}
1489
1490static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1491{
1492 unsigned long mask;
1493
1494 mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
1495 mask |= _SEGMENT_ENTRY_DIRTY;
1496 mask |= _SEGMENT_ENTRY_YOUNG;
1497 mask |= _SEGMENT_ENTRY_LARGE;
1498 mask |= _SEGMENT_ENTRY_SOFT_DIRTY;
1499 pmd = __pmd(pmd_val(pmd) & mask);
1500 pmd = set_pmd_bit(pmd, __pgprot(massage_pgprot_pmd(newprot)));
1501 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1502 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1503 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1504 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
1505 return pmd;
1506}
1507
1508static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1509{
1510 return __pmd(physpage + massage_pgprot_pmd(pgprot));
1511}
1512
1513#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1514
1515static inline void __pmdp_csp(pmd_t *pmdp)
1516{
1517 csp((unsigned int *)pmdp + 1, pmd_val(*pmdp),
1518 pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1519}
1520
1521#define IDTE_GLOBAL 0
1522#define IDTE_LOCAL 1
1523
1524#define IDTE_PTOA 0x0800
1525#define IDTE_NODAT 0x1000
1526#define IDTE_GUEST_ASCE 0x2000
1527
1528static __always_inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
1529 unsigned long opt, unsigned long asce,
1530 int local)
1531{
1532 unsigned long sto;
1533
1534 sto = __pa(pmdp) - pmd_index(addr) * sizeof(pmd_t);
1535 if (__builtin_constant_p(opt) && opt == 0) {
1536 /* flush without guest asce */
1537 asm volatile(
1538 " idte %[r1],0,%[r2],%[m4]"
1539 : "+m" (*pmdp)
1540 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)),
1541 [m4] "i" (local)
1542 : "cc" );
1543 } else {
1544 /* flush with guest asce */
1545 asm volatile(
1546 " idte %[r1],%[r3],%[r2],%[m4]"
1547 : "+m" (*pmdp)
1548 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt),
1549 [r3] "a" (asce), [m4] "i" (local)
1550 : "cc" );
1551 }
1552}
1553
1554static __always_inline void __pudp_idte(unsigned long addr, pud_t *pudp,
1555 unsigned long opt, unsigned long asce,
1556 int local)
1557{
1558 unsigned long r3o;
1559
1560 r3o = __pa(pudp) - pud_index(addr) * sizeof(pud_t);
1561 r3o |= _ASCE_TYPE_REGION3;
1562 if (__builtin_constant_p(opt) && opt == 0) {
1563 /* flush without guest asce */
1564 asm volatile(
1565 " idte %[r1],0,%[r2],%[m4]"
1566 : "+m" (*pudp)
1567 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)),
1568 [m4] "i" (local)
1569 : "cc");
1570 } else {
1571 /* flush with guest asce */
1572 asm volatile(
1573 " idte %[r1],%[r3],%[r2],%[m4]"
1574 : "+m" (*pudp)
1575 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt),
1576 [r3] "a" (asce), [m4] "i" (local)
1577 : "cc" );
1578 }
1579}
1580
1581pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1582pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1583pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t);
1584
1585#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1586
1587#define __HAVE_ARCH_PGTABLE_DEPOSIT
1588void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1589 pgtable_t pgtable);
1590
1591#define __HAVE_ARCH_PGTABLE_WITHDRAW
1592pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1593
1594#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1595static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
1596 unsigned long addr, pmd_t *pmdp,
1597 pmd_t entry, int dirty)
1598{
1599 VM_BUG_ON(addr & ~HPAGE_MASK);
1600
1601 entry = pmd_mkyoung(entry);
1602 if (dirty)
1603 entry = pmd_mkdirty(entry);
1604 if (pmd_val(*pmdp) == pmd_val(entry))
1605 return 0;
1606 pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
1607 return 1;
1608}
1609
1610#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1611static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1612 unsigned long addr, pmd_t *pmdp)
1613{
1614 pmd_t pmd = *pmdp;
1615
1616 pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
1617 return pmd_young(pmd);
1618}
1619
1620#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1621static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
1622 unsigned long addr, pmd_t *pmdp)
1623{
1624 VM_BUG_ON(addr & ~HPAGE_MASK);
1625 return pmdp_test_and_clear_young(vma, addr, pmdp);
1626}
1627
1628static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1629 pmd_t *pmdp, pmd_t entry)
1630{
1631 if (!MACHINE_HAS_NX)
1632 entry = clear_pmd_bit(entry, __pgprot(_SEGMENT_ENTRY_NOEXEC));
1633 set_pmd(pmdp, entry);
1634}
1635
1636static inline pmd_t pmd_mkhuge(pmd_t pmd)
1637{
1638 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_LARGE));
1639 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG));
1640 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1641}
1642
1643#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1644static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1645 unsigned long addr, pmd_t *pmdp)
1646{
1647 return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1648}
1649
1650#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
1651static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
1652 unsigned long addr,
1653 pmd_t *pmdp, int full)
1654{
1655 if (full) {
1656 pmd_t pmd = *pmdp;
1657 set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1658 return pmd;
1659 }
1660 return pmdp_xchg_lazy(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1661}
1662
1663#define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
1664static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
1665 unsigned long addr, pmd_t *pmdp)
1666{
1667 return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
1668}
1669
1670#define __HAVE_ARCH_PMDP_INVALIDATE
1671static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
1672 unsigned long addr, pmd_t *pmdp)
1673{
1674 pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1675
1676 return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
1677}
1678
1679#define __HAVE_ARCH_PMDP_SET_WRPROTECT
1680static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1681 unsigned long addr, pmd_t *pmdp)
1682{
1683 pmd_t pmd = *pmdp;
1684
1685 if (pmd_write(pmd))
1686 pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
1687}
1688
1689static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1690 unsigned long address,
1691 pmd_t *pmdp)
1692{
1693 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
1694}
1695#define pmdp_collapse_flush pmdp_collapse_flush
1696
1697#define pfn_pmd(pfn, pgprot) mk_pmd_phys(((pfn) << PAGE_SHIFT), (pgprot))
1698#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1699
1700static inline int pmd_trans_huge(pmd_t pmd)
1701{
1702 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1703}
1704
1705#define has_transparent_hugepage has_transparent_hugepage
1706static inline int has_transparent_hugepage(void)
1707{
1708 return MACHINE_HAS_EDAT1 ? 1 : 0;
1709}
1710#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1711
1712/*
1713 * 64 bit swap entry format:
1714 * A page-table entry has some bits we have to treat in a special way.
1715 * Bits 52 and bit 55 have to be zero, otherwise a specification
1716 * exception will occur instead of a page translation exception. The
1717 * specification exception has the bad habit not to store necessary
1718 * information in the lowcore.
1719 * Bits 54 and 63 are used to indicate the page type.
1720 * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
1721 * This leaves the bits 0-51 and bits 56-62 to store type and offset.
1722 * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51
1723 * for the offset.
1724 * | offset |01100|type |00|
1725 * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
1726 * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
1727 */
1728
1729#define __SWP_OFFSET_MASK ((1UL << 52) - 1)
1730#define __SWP_OFFSET_SHIFT 12
1731#define __SWP_TYPE_MASK ((1UL << 5) - 1)
1732#define __SWP_TYPE_SHIFT 2
1733
1734static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1735{
1736 unsigned long pteval;
1737
1738 pteval = _PAGE_INVALID | _PAGE_PROTECT;
1739 pteval |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
1740 pteval |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
1741 return __pte(pteval);
1742}
1743
1744static inline unsigned long __swp_type(swp_entry_t entry)
1745{
1746 return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
1747}
1748
1749static inline unsigned long __swp_offset(swp_entry_t entry)
1750{
1751 return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
1752}
1753
1754static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
1755{
1756 return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
1757}
1758
1759#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1760#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1761
1762#define kern_addr_valid(addr) (1)
1763
1764extern int vmem_add_mapping(unsigned long start, unsigned long size);
1765extern void vmem_remove_mapping(unsigned long start, unsigned long size);
1766extern int s390_enable_sie(void);
1767extern int s390_enable_skey(void);
1768extern void s390_reset_cmma(struct mm_struct *mm);
1769
1770/* s390 has a private copy of get unmapped area to deal with cache synonyms */
1771#define HAVE_ARCH_UNMAPPED_AREA
1772#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1773
1774#define pmd_pgtable(pmd) \
1775 ((pgtable_t)__va(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE))
1776
1777#endif /* _S390_PAGE_H */