Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * S390 version
4 * Copyright IBM Corp. 1999, 2000
5 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 * Ulrich Weigand (weigand@de.ibm.com)
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 *
9 * Derived from "include/asm-i386/pgtable.h"
10 */
11
12#ifndef _ASM_S390_PGTABLE_H
13#define _ASM_S390_PGTABLE_H
14
15#include <linux/sched.h>
16#include <linux/mm_types.h>
17#include <linux/page-flags.h>
18#include <linux/radix-tree.h>
19#include <linux/atomic.h>
20#include <asm/bug.h>
21#include <asm/page.h>
22
23extern pgd_t swapper_pg_dir[];
24extern void paging_init(void);
25
26enum {
27 PG_DIRECT_MAP_4K = 0,
28 PG_DIRECT_MAP_1M,
29 PG_DIRECT_MAP_2G,
30 PG_DIRECT_MAP_MAX
31};
32
33extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
34
35static inline void update_page_count(int level, long count)
36{
37 if (IS_ENABLED(CONFIG_PROC_FS))
38 atomic_long_add(count, &direct_pages_count[level]);
39}
40
41struct seq_file;
42void arch_report_meminfo(struct seq_file *m);
43
44/*
45 * The S390 doesn't have any external MMU info: the kernel page
46 * tables contain all the necessary information.
47 */
48#define update_mmu_cache(vma, address, ptep) do { } while (0)
49#define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
50
51/*
52 * ZERO_PAGE is a global shared page that is always zero; used
53 * for zero-mapped memory areas etc..
54 */
55
56extern unsigned long empty_zero_page;
57extern unsigned long zero_page_mask;
58
59#define ZERO_PAGE(vaddr) \
60 (virt_to_page((void *)(empty_zero_page + \
61 (((unsigned long)(vaddr)) &zero_page_mask))))
62#define __HAVE_COLOR_ZERO_PAGE
63
64/* TODO: s390 cannot support io_remap_pfn_range... */
65
66#define FIRST_USER_ADDRESS 0UL
67
68#define pte_ERROR(e) \
69 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
70#define pmd_ERROR(e) \
71 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
72#define pud_ERROR(e) \
73 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
74#define p4d_ERROR(e) \
75 printk("%s:%d: bad p4d %p.\n", __FILE__, __LINE__, (void *) p4d_val(e))
76#define pgd_ERROR(e) \
77 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
78
79/*
80 * The vmalloc and module area will always be on the topmost area of the
81 * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules.
82 * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
83 * modules will reside. That makes sure that inter module branches always
84 * happen without trampolines and in addition the placement within a 2GB frame
85 * is branch prediction unit friendly.
86 */
87extern unsigned long VMALLOC_START;
88extern unsigned long VMALLOC_END;
89extern struct page *vmemmap;
90
91#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
92
93extern unsigned long MODULES_VADDR;
94extern unsigned long MODULES_END;
95#define MODULES_VADDR MODULES_VADDR
96#define MODULES_END MODULES_END
97#define MODULES_LEN (1UL << 31)
98
99static inline int is_module_addr(void *addr)
100{
101 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
102 if (addr < (void *)MODULES_VADDR)
103 return 0;
104 if (addr > (void *)MODULES_END)
105 return 0;
106 return 1;
107}
108
109/*
110 * A 64 bit pagetable entry of S390 has following format:
111 * | PFRA |0IPC| OS |
112 * 0000000000111111111122222222223333333333444444444455555555556666
113 * 0123456789012345678901234567890123456789012345678901234567890123
114 *
115 * I Page-Invalid Bit: Page is not available for address-translation
116 * P Page-Protection Bit: Store access not possible for page
117 * C Change-bit override: HW is not required to set change bit
118 *
119 * A 64 bit segmenttable entry of S390 has following format:
120 * | P-table origin | TT
121 * 0000000000111111111122222222223333333333444444444455555555556666
122 * 0123456789012345678901234567890123456789012345678901234567890123
123 *
124 * I Segment-Invalid Bit: Segment is not available for address-translation
125 * C Common-Segment Bit: Segment is not private (PoP 3-30)
126 * P Page-Protection Bit: Store access not possible for page
127 * TT Type 00
128 *
129 * A 64 bit region table entry of S390 has following format:
130 * | S-table origin | TF TTTL
131 * 0000000000111111111122222222223333333333444444444455555555556666
132 * 0123456789012345678901234567890123456789012345678901234567890123
133 *
134 * I Segment-Invalid Bit: Segment is not available for address-translation
135 * TT Type 01
136 * TF
137 * TL Table length
138 *
139 * The 64 bit regiontable origin of S390 has following format:
140 * | region table origon | DTTL
141 * 0000000000111111111122222222223333333333444444444455555555556666
142 * 0123456789012345678901234567890123456789012345678901234567890123
143 *
144 * X Space-Switch event:
145 * G Segment-Invalid Bit:
146 * P Private-Space Bit:
147 * S Storage-Alteration:
148 * R Real space
149 * TL Table-Length:
150 *
151 * A storage key has the following format:
152 * | ACC |F|R|C|0|
153 * 0 3 4 5 6 7
154 * ACC: access key
155 * F : fetch protection bit
156 * R : referenced bit
157 * C : changed bit
158 */
159
160/* Hardware bits in the page table entry */
161#define _PAGE_NOEXEC 0x100 /* HW no-execute bit */
162#define _PAGE_PROTECT 0x200 /* HW read-only bit */
163#define _PAGE_INVALID 0x400 /* HW invalid bit */
164#define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
165
166/* Software bits in the page table entry */
167#define _PAGE_PRESENT 0x001 /* SW pte present bit */
168#define _PAGE_YOUNG 0x004 /* SW pte young bit */
169#define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
170#define _PAGE_READ 0x010 /* SW pte read bit */
171#define _PAGE_WRITE 0x020 /* SW pte write bit */
172#define _PAGE_SPECIAL 0x040 /* SW associated with special page */
173#define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */
174
175#ifdef CONFIG_MEM_SOFT_DIRTY
176#define _PAGE_SOFT_DIRTY 0x002 /* SW pte soft dirty bit */
177#else
178#define _PAGE_SOFT_DIRTY 0x000
179#endif
180
181/* Set of bits not changed in pte_modify */
182#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
183 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
184
185/*
186 * handle_pte_fault uses pte_present and pte_none to find out the pte type
187 * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
188 * distinguish present from not-present ptes. It is changed only with the page
189 * table lock held.
190 *
191 * The following table gives the different possible bit combinations for
192 * the pte hardware and software bits in the last 12 bits of a pte
193 * (. unassigned bit, x don't care, t swap type):
194 *
195 * 842100000000
196 * 000084210000
197 * 000000008421
198 * .IR.uswrdy.p
199 * empty .10.00000000
200 * swap .11..ttttt.0
201 * prot-none, clean, old .11.xx0000.1
202 * prot-none, clean, young .11.xx0001.1
203 * prot-none, dirty, old .11.xx0010.1
204 * prot-none, dirty, young .11.xx0011.1
205 * read-only, clean, old .11.xx0100.1
206 * read-only, clean, young .01.xx0101.1
207 * read-only, dirty, old .11.xx0110.1
208 * read-only, dirty, young .01.xx0111.1
209 * read-write, clean, old .11.xx1100.1
210 * read-write, clean, young .01.xx1101.1
211 * read-write, dirty, old .10.xx1110.1
212 * read-write, dirty, young .00.xx1111.1
213 * HW-bits: R read-only, I invalid
214 * SW-bits: p present, y young, d dirty, r read, w write, s special,
215 * u unused, l large
216 *
217 * pte_none is true for the bit pattern .10.00000000, pte == 0x400
218 * pte_swap is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
219 * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
220 */
221
222/* Bits in the segment/region table address-space-control-element */
223#define _ASCE_ORIGIN ~0xfffUL/* region/segment table origin */
224#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
225#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
226#define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
227#define _ASCE_REAL_SPACE 0x20 /* real space control */
228#define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
229#define _ASCE_TYPE_REGION1 0x0c /* region first table type */
230#define _ASCE_TYPE_REGION2 0x08 /* region second table type */
231#define _ASCE_TYPE_REGION3 0x04 /* region third table type */
232#define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
233#define _ASCE_TABLE_LENGTH 0x03 /* region table length */
234
235/* Bits in the region table entry */
236#define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
237#define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
238#define _REGION_ENTRY_NOEXEC 0x100 /* region no-execute bit */
239#define _REGION_ENTRY_OFFSET 0xc0 /* region table offset */
240#define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
241#define _REGION_ENTRY_TYPE_MASK 0x0c /* region table type mask */
242#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
243#define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
244#define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
245#define _REGION_ENTRY_LENGTH 0x03 /* region third length */
246
247#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
248#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
249#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
250#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
251#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
252#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
253
254#define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address */
255#define _REGION3_ENTRY_DIRTY 0x2000 /* SW region dirty bit */
256#define _REGION3_ENTRY_YOUNG 0x1000 /* SW region young bit */
257#define _REGION3_ENTRY_LARGE 0x0400 /* RTTE-format control, large page */
258#define _REGION3_ENTRY_READ 0x0002 /* SW region read bit */
259#define _REGION3_ENTRY_WRITE 0x0001 /* SW region write bit */
260
261#ifdef CONFIG_MEM_SOFT_DIRTY
262#define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */
263#else
264#define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */
265#endif
266
267#define _REGION_ENTRY_BITS 0xfffffffffffff22fUL
268#define _REGION_ENTRY_BITS_LARGE 0xffffffff8000fe2fUL
269
270/* Bits in the segment table entry */
271#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
272#define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
273#define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe30UL
274#define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff00730UL
275#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
276#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* page table origin */
277#define _SEGMENT_ENTRY_PROTECT 0x200 /* segment protection bit */
278#define _SEGMENT_ENTRY_NOEXEC 0x100 /* segment no-execute bit */
279#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
280#define _SEGMENT_ENTRY_TYPE_MASK 0x0c /* segment table type mask */
281
282#define _SEGMENT_ENTRY (0)
283#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
284
285#define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */
286#define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
287#define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
288#define _SEGMENT_ENTRY_WRITE 0x0002 /* SW segment write bit */
289#define _SEGMENT_ENTRY_READ 0x0001 /* SW segment read bit */
290
291#ifdef CONFIG_MEM_SOFT_DIRTY
292#define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */
293#else
294#define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */
295#endif
296
297#define _CRST_ENTRIES 2048 /* number of region/segment table entries */
298#define _PAGE_ENTRIES 256 /* number of page table entries */
299
300#define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8)
301#define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8)
302
303#define _REGION1_SHIFT 53
304#define _REGION2_SHIFT 42
305#define _REGION3_SHIFT 31
306#define _SEGMENT_SHIFT 20
307
308#define _REGION1_INDEX (0x7ffUL << _REGION1_SHIFT)
309#define _REGION2_INDEX (0x7ffUL << _REGION2_SHIFT)
310#define _REGION3_INDEX (0x7ffUL << _REGION3_SHIFT)
311#define _SEGMENT_INDEX (0x7ffUL << _SEGMENT_SHIFT)
312#define _PAGE_INDEX (0xffUL << _PAGE_SHIFT)
313
314#define _REGION1_SIZE (1UL << _REGION1_SHIFT)
315#define _REGION2_SIZE (1UL << _REGION2_SHIFT)
316#define _REGION3_SIZE (1UL << _REGION3_SHIFT)
317#define _SEGMENT_SIZE (1UL << _SEGMENT_SHIFT)
318
319#define _REGION1_MASK (~(_REGION1_SIZE - 1))
320#define _REGION2_MASK (~(_REGION2_SIZE - 1))
321#define _REGION3_MASK (~(_REGION3_SIZE - 1))
322#define _SEGMENT_MASK (~(_SEGMENT_SIZE - 1))
323
324#define PMD_SHIFT _SEGMENT_SHIFT
325#define PUD_SHIFT _REGION3_SHIFT
326#define P4D_SHIFT _REGION2_SHIFT
327#define PGDIR_SHIFT _REGION1_SHIFT
328
329#define PMD_SIZE _SEGMENT_SIZE
330#define PUD_SIZE _REGION3_SIZE
331#define P4D_SIZE _REGION2_SIZE
332#define PGDIR_SIZE _REGION1_SIZE
333
334#define PMD_MASK _SEGMENT_MASK
335#define PUD_MASK _REGION3_MASK
336#define P4D_MASK _REGION2_MASK
337#define PGDIR_MASK _REGION1_MASK
338
339#define PTRS_PER_PTE _PAGE_ENTRIES
340#define PTRS_PER_PMD _CRST_ENTRIES
341#define PTRS_PER_PUD _CRST_ENTRIES
342#define PTRS_PER_P4D _CRST_ENTRIES
343#define PTRS_PER_PGD _CRST_ENTRIES
344
345#define MAX_PTRS_PER_P4D PTRS_PER_P4D
346
347/*
348 * Segment table and region3 table entry encoding
349 * (R = read-only, I = invalid, y = young bit):
350 * dy..R...I...wr
351 * prot-none, clean, old 00..1...1...00
352 * prot-none, clean, young 01..1...1...00
353 * prot-none, dirty, old 10..1...1...00
354 * prot-none, dirty, young 11..1...1...00
355 * read-only, clean, old 00..1...1...01
356 * read-only, clean, young 01..1...0...01
357 * read-only, dirty, old 10..1...1...01
358 * read-only, dirty, young 11..1...0...01
359 * read-write, clean, old 00..1...1...11
360 * read-write, clean, young 01..1...0...11
361 * read-write, dirty, old 10..0...1...11
362 * read-write, dirty, young 11..0...0...11
363 * The segment table origin is used to distinguish empty (origin==0) from
364 * read-write, old segment table entries (origin!=0)
365 * HW-bits: R read-only, I invalid
366 * SW-bits: y young, d dirty, r read, w write
367 */
368
369/* Page status table bits for virtualization */
370#define PGSTE_ACC_BITS 0xf000000000000000UL
371#define PGSTE_FP_BIT 0x0800000000000000UL
372#define PGSTE_PCL_BIT 0x0080000000000000UL
373#define PGSTE_HR_BIT 0x0040000000000000UL
374#define PGSTE_HC_BIT 0x0020000000000000UL
375#define PGSTE_GR_BIT 0x0004000000000000UL
376#define PGSTE_GC_BIT 0x0002000000000000UL
377#define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */
378#define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */
379#define PGSTE_VSIE_BIT 0x0000200000000000UL /* ref'd in a shadow table */
380
381/* Guest Page State used for virtualization */
382#define _PGSTE_GPS_ZERO 0x0000000080000000UL
383#define _PGSTE_GPS_NODAT 0x0000000040000000UL
384#define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
385#define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
386#define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
387#define _PGSTE_GPS_USAGE_POT_VOLATILE 0x0000000002000000UL
388#define _PGSTE_GPS_USAGE_VOLATILE _PGSTE_GPS_USAGE_MASK
389
390/*
391 * A user page table pointer has the space-switch-event bit, the
392 * private-space-control bit and the storage-alteration-event-control
393 * bit set. A kernel page table pointer doesn't need them.
394 */
395#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
396 _ASCE_ALT_EVENT)
397
398/*
399 * Page protection definitions.
400 */
401#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
402#define PAGE_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | \
403 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
404#define PAGE_RX __pgprot(_PAGE_PRESENT | _PAGE_READ | \
405 _PAGE_INVALID | _PAGE_PROTECT)
406#define PAGE_RW __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
407 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
408#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
409 _PAGE_INVALID | _PAGE_PROTECT)
410
411#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
412 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
413#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
414 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
415#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
416 _PAGE_PROTECT | _PAGE_NOEXEC)
417#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
418 _PAGE_YOUNG | _PAGE_DIRTY)
419
420/*
421 * On s390 the page table entry has an invalid bit and a read-only bit.
422 * Read permission implies execute permission and write permission
423 * implies read permission.
424 */
425 /*xwr*/
426#define __P000 PAGE_NONE
427#define __P001 PAGE_RO
428#define __P010 PAGE_RO
429#define __P011 PAGE_RO
430#define __P100 PAGE_RX
431#define __P101 PAGE_RX
432#define __P110 PAGE_RX
433#define __P111 PAGE_RX
434
435#define __S000 PAGE_NONE
436#define __S001 PAGE_RO
437#define __S010 PAGE_RW
438#define __S011 PAGE_RW
439#define __S100 PAGE_RX
440#define __S101 PAGE_RX
441#define __S110 PAGE_RWX
442#define __S111 PAGE_RWX
443
444/*
445 * Segment entry (large page) protection definitions.
446 */
447#define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
448 _SEGMENT_ENTRY_PROTECT)
449#define SEGMENT_RO __pgprot(_SEGMENT_ENTRY_PROTECT | \
450 _SEGMENT_ENTRY_READ | \
451 _SEGMENT_ENTRY_NOEXEC)
452#define SEGMENT_RX __pgprot(_SEGMENT_ENTRY_PROTECT | \
453 _SEGMENT_ENTRY_READ)
454#define SEGMENT_RW __pgprot(_SEGMENT_ENTRY_READ | \
455 _SEGMENT_ENTRY_WRITE | \
456 _SEGMENT_ENTRY_NOEXEC)
457#define SEGMENT_RWX __pgprot(_SEGMENT_ENTRY_READ | \
458 _SEGMENT_ENTRY_WRITE)
459#define SEGMENT_KERNEL __pgprot(_SEGMENT_ENTRY | \
460 _SEGMENT_ENTRY_LARGE | \
461 _SEGMENT_ENTRY_READ | \
462 _SEGMENT_ENTRY_WRITE | \
463 _SEGMENT_ENTRY_YOUNG | \
464 _SEGMENT_ENTRY_DIRTY | \
465 _SEGMENT_ENTRY_NOEXEC)
466#define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY | \
467 _SEGMENT_ENTRY_LARGE | \
468 _SEGMENT_ENTRY_READ | \
469 _SEGMENT_ENTRY_YOUNG | \
470 _SEGMENT_ENTRY_PROTECT | \
471 _SEGMENT_ENTRY_NOEXEC)
472#define SEGMENT_KERNEL_EXEC __pgprot(_SEGMENT_ENTRY | \
473 _SEGMENT_ENTRY_LARGE | \
474 _SEGMENT_ENTRY_READ | \
475 _SEGMENT_ENTRY_WRITE | \
476 _SEGMENT_ENTRY_YOUNG | \
477 _SEGMENT_ENTRY_DIRTY)
478
479/*
480 * Region3 entry (large page) protection definitions.
481 */
482
483#define REGION3_KERNEL __pgprot(_REGION_ENTRY_TYPE_R3 | \
484 _REGION3_ENTRY_LARGE | \
485 _REGION3_ENTRY_READ | \
486 _REGION3_ENTRY_WRITE | \
487 _REGION3_ENTRY_YOUNG | \
488 _REGION3_ENTRY_DIRTY | \
489 _REGION_ENTRY_NOEXEC)
490#define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
491 _REGION3_ENTRY_LARGE | \
492 _REGION3_ENTRY_READ | \
493 _REGION3_ENTRY_YOUNG | \
494 _REGION_ENTRY_PROTECT | \
495 _REGION_ENTRY_NOEXEC)
496
497static inline bool mm_p4d_folded(struct mm_struct *mm)
498{
499 return mm->context.asce_limit <= _REGION1_SIZE;
500}
501#define mm_p4d_folded(mm) mm_p4d_folded(mm)
502
503static inline bool mm_pud_folded(struct mm_struct *mm)
504{
505 return mm->context.asce_limit <= _REGION2_SIZE;
506}
507#define mm_pud_folded(mm) mm_pud_folded(mm)
508
509static inline bool mm_pmd_folded(struct mm_struct *mm)
510{
511 return mm->context.asce_limit <= _REGION3_SIZE;
512}
513#define mm_pmd_folded(mm) mm_pmd_folded(mm)
514
515static inline int mm_has_pgste(struct mm_struct *mm)
516{
517#ifdef CONFIG_PGSTE
518 if (unlikely(mm->context.has_pgste))
519 return 1;
520#endif
521 return 0;
522}
523
524static inline int mm_alloc_pgste(struct mm_struct *mm)
525{
526#ifdef CONFIG_PGSTE
527 if (unlikely(mm->context.alloc_pgste))
528 return 1;
529#endif
530 return 0;
531}
532
533/*
534 * In the case that a guest uses storage keys
535 * faults should no longer be backed by zero pages
536 */
537#define mm_forbids_zeropage mm_has_pgste
538static inline int mm_uses_skeys(struct mm_struct *mm)
539{
540#ifdef CONFIG_PGSTE
541 if (mm->context.uses_skeys)
542 return 1;
543#endif
544 return 0;
545}
546
547static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new)
548{
549 register unsigned long reg2 asm("2") = old;
550 register unsigned long reg3 asm("3") = new;
551 unsigned long address = (unsigned long)ptr | 1;
552
553 asm volatile(
554 " csp %0,%3"
555 : "+d" (reg2), "+m" (*ptr)
556 : "d" (reg3), "d" (address)
557 : "cc");
558}
559
560static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new)
561{
562 register unsigned long reg2 asm("2") = old;
563 register unsigned long reg3 asm("3") = new;
564 unsigned long address = (unsigned long)ptr | 1;
565
566 asm volatile(
567 " .insn rre,0xb98a0000,%0,%3"
568 : "+d" (reg2), "+m" (*ptr)
569 : "d" (reg3), "d" (address)
570 : "cc");
571}
572
573#define CRDTE_DTT_PAGE 0x00UL
574#define CRDTE_DTT_SEGMENT 0x10UL
575#define CRDTE_DTT_REGION3 0x14UL
576#define CRDTE_DTT_REGION2 0x18UL
577#define CRDTE_DTT_REGION1 0x1cUL
578
579static inline void crdte(unsigned long old, unsigned long new,
580 unsigned long table, unsigned long dtt,
581 unsigned long address, unsigned long asce)
582{
583 register unsigned long reg2 asm("2") = old;
584 register unsigned long reg3 asm("3") = new;
585 register unsigned long reg4 asm("4") = table | dtt;
586 register unsigned long reg5 asm("5") = address;
587
588 asm volatile(".insn rrf,0xb98f0000,%0,%2,%4,0"
589 : "+d" (reg2)
590 : "d" (reg3), "d" (reg4), "d" (reg5), "a" (asce)
591 : "memory", "cc");
592}
593
594/*
595 * pgd/p4d/pud/pmd/pte query functions
596 */
597static inline int pgd_folded(pgd_t pgd)
598{
599 return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1;
600}
601
602static inline int pgd_present(pgd_t pgd)
603{
604 if (pgd_folded(pgd))
605 return 1;
606 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
607}
608
609static inline int pgd_none(pgd_t pgd)
610{
611 if (pgd_folded(pgd))
612 return 0;
613 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
614}
615
616static inline int pgd_bad(pgd_t pgd)
617{
618 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1)
619 return 0;
620 return (pgd_val(pgd) & ~_REGION_ENTRY_BITS) != 0;
621}
622
623static inline unsigned long pgd_pfn(pgd_t pgd)
624{
625 unsigned long origin_mask;
626
627 origin_mask = _REGION_ENTRY_ORIGIN;
628 return (pgd_val(pgd) & origin_mask) >> PAGE_SHIFT;
629}
630
631static inline int p4d_folded(p4d_t p4d)
632{
633 return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2;
634}
635
636static inline int p4d_present(p4d_t p4d)
637{
638 if (p4d_folded(p4d))
639 return 1;
640 return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL;
641}
642
643static inline int p4d_none(p4d_t p4d)
644{
645 if (p4d_folded(p4d))
646 return 0;
647 return p4d_val(p4d) == _REGION2_ENTRY_EMPTY;
648}
649
650static inline unsigned long p4d_pfn(p4d_t p4d)
651{
652 unsigned long origin_mask;
653
654 origin_mask = _REGION_ENTRY_ORIGIN;
655 return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT;
656}
657
658static inline int pud_folded(pud_t pud)
659{
660 return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3;
661}
662
663static inline int pud_present(pud_t pud)
664{
665 if (pud_folded(pud))
666 return 1;
667 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
668}
669
670static inline int pud_none(pud_t pud)
671{
672 if (pud_folded(pud))
673 return 0;
674 return pud_val(pud) == _REGION3_ENTRY_EMPTY;
675}
676
677static inline int pud_large(pud_t pud)
678{
679 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
680 return 0;
681 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
682}
683
684static inline unsigned long pud_pfn(pud_t pud)
685{
686 unsigned long origin_mask;
687
688 origin_mask = _REGION_ENTRY_ORIGIN;
689 if (pud_large(pud))
690 origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
691 return (pud_val(pud) & origin_mask) >> PAGE_SHIFT;
692}
693
694static inline int pmd_large(pmd_t pmd)
695{
696 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
697}
698
699static inline int pmd_bad(pmd_t pmd)
700{
701 if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0)
702 return 1;
703 if (pmd_large(pmd))
704 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
705 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
706}
707
708static inline int pud_bad(pud_t pud)
709{
710 unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
711
712 if (type > _REGION_ENTRY_TYPE_R3)
713 return 1;
714 if (type < _REGION_ENTRY_TYPE_R3)
715 return 0;
716 if (pud_large(pud))
717 return (pud_val(pud) & ~_REGION_ENTRY_BITS_LARGE) != 0;
718 return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
719}
720
721static inline int p4d_bad(p4d_t p4d)
722{
723 unsigned long type = p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK;
724
725 if (type > _REGION_ENTRY_TYPE_R2)
726 return 1;
727 if (type < _REGION_ENTRY_TYPE_R2)
728 return 0;
729 return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0;
730}
731
732static inline int pmd_present(pmd_t pmd)
733{
734 return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY;
735}
736
737static inline int pmd_none(pmd_t pmd)
738{
739 return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
740}
741
742static inline unsigned long pmd_pfn(pmd_t pmd)
743{
744 unsigned long origin_mask;
745
746 origin_mask = _SEGMENT_ENTRY_ORIGIN;
747 if (pmd_large(pmd))
748 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
749 return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
750}
751
752#define pmd_write pmd_write
753static inline int pmd_write(pmd_t pmd)
754{
755 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
756}
757
758static inline int pmd_dirty(pmd_t pmd)
759{
760 int dirty = 1;
761 if (pmd_large(pmd))
762 dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
763 return dirty;
764}
765
766static inline int pmd_young(pmd_t pmd)
767{
768 int young = 1;
769 if (pmd_large(pmd))
770 young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
771 return young;
772}
773
774static inline int pte_present(pte_t pte)
775{
776 /* Bit pattern: (pte & 0x001) == 0x001 */
777 return (pte_val(pte) & _PAGE_PRESENT) != 0;
778}
779
780static inline int pte_none(pte_t pte)
781{
782 /* Bit pattern: pte == 0x400 */
783 return pte_val(pte) == _PAGE_INVALID;
784}
785
786static inline int pte_swap(pte_t pte)
787{
788 /* Bit pattern: (pte & 0x201) == 0x200 */
789 return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
790 == _PAGE_PROTECT;
791}
792
793static inline int pte_special(pte_t pte)
794{
795 return (pte_val(pte) & _PAGE_SPECIAL);
796}
797
798#define __HAVE_ARCH_PTE_SAME
799static inline int pte_same(pte_t a, pte_t b)
800{
801 return pte_val(a) == pte_val(b);
802}
803
804#ifdef CONFIG_NUMA_BALANCING
805static inline int pte_protnone(pte_t pte)
806{
807 return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
808}
809
810static inline int pmd_protnone(pmd_t pmd)
811{
812 /* pmd_large(pmd) implies pmd_present(pmd) */
813 return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
814}
815#endif
816
817static inline int pte_soft_dirty(pte_t pte)
818{
819 return pte_val(pte) & _PAGE_SOFT_DIRTY;
820}
821#define pte_swp_soft_dirty pte_soft_dirty
822
823static inline pte_t pte_mksoft_dirty(pte_t pte)
824{
825 pte_val(pte) |= _PAGE_SOFT_DIRTY;
826 return pte;
827}
828#define pte_swp_mksoft_dirty pte_mksoft_dirty
829
830static inline pte_t pte_clear_soft_dirty(pte_t pte)
831{
832 pte_val(pte) &= ~_PAGE_SOFT_DIRTY;
833 return pte;
834}
835#define pte_swp_clear_soft_dirty pte_clear_soft_dirty
836
837static inline int pmd_soft_dirty(pmd_t pmd)
838{
839 return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
840}
841
842static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
843{
844 pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY;
845 return pmd;
846}
847
848static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
849{
850 pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY;
851 return pmd;
852}
853
854/*
855 * query functions pte_write/pte_dirty/pte_young only work if
856 * pte_present() is true. Undefined behaviour if not..
857 */
858static inline int pte_write(pte_t pte)
859{
860 return (pte_val(pte) & _PAGE_WRITE) != 0;
861}
862
863static inline int pte_dirty(pte_t pte)
864{
865 return (pte_val(pte) & _PAGE_DIRTY) != 0;
866}
867
868static inline int pte_young(pte_t pte)
869{
870 return (pte_val(pte) & _PAGE_YOUNG) != 0;
871}
872
873#define __HAVE_ARCH_PTE_UNUSED
874static inline int pte_unused(pte_t pte)
875{
876 return pte_val(pte) & _PAGE_UNUSED;
877}
878
879/*
880 * pgd/pmd/pte modification functions
881 */
882
883static inline void pgd_clear(pgd_t *pgd)
884{
885 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
886 pgd_val(*pgd) = _REGION1_ENTRY_EMPTY;
887}
888
889static inline void p4d_clear(p4d_t *p4d)
890{
891 if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
892 p4d_val(*p4d) = _REGION2_ENTRY_EMPTY;
893}
894
895static inline void pud_clear(pud_t *pud)
896{
897 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
898 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
899}
900
901static inline void pmd_clear(pmd_t *pmdp)
902{
903 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
904}
905
906static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
907{
908 pte_val(*ptep) = _PAGE_INVALID;
909}
910
911/*
912 * The following pte modification functions only work if
913 * pte_present() is true. Undefined behaviour if not..
914 */
915static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
916{
917 pte_val(pte) &= _PAGE_CHG_MASK;
918 pte_val(pte) |= pgprot_val(newprot);
919 /*
920 * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX
921 * has the invalid bit set, clear it again for readable, young pages
922 */
923 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
924 pte_val(pte) &= ~_PAGE_INVALID;
925 /*
926 * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page
927 * protection bit set, clear it again for writable, dirty pages
928 */
929 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
930 pte_val(pte) &= ~_PAGE_PROTECT;
931 return pte;
932}
933
934static inline pte_t pte_wrprotect(pte_t pte)
935{
936 pte_val(pte) &= ~_PAGE_WRITE;
937 pte_val(pte) |= _PAGE_PROTECT;
938 return pte;
939}
940
941static inline pte_t pte_mkwrite(pte_t pte)
942{
943 pte_val(pte) |= _PAGE_WRITE;
944 if (pte_val(pte) & _PAGE_DIRTY)
945 pte_val(pte) &= ~_PAGE_PROTECT;
946 return pte;
947}
948
949static inline pte_t pte_mkclean(pte_t pte)
950{
951 pte_val(pte) &= ~_PAGE_DIRTY;
952 pte_val(pte) |= _PAGE_PROTECT;
953 return pte;
954}
955
956static inline pte_t pte_mkdirty(pte_t pte)
957{
958 pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY;
959 if (pte_val(pte) & _PAGE_WRITE)
960 pte_val(pte) &= ~_PAGE_PROTECT;
961 return pte;
962}
963
964static inline pte_t pte_mkold(pte_t pte)
965{
966 pte_val(pte) &= ~_PAGE_YOUNG;
967 pte_val(pte) |= _PAGE_INVALID;
968 return pte;
969}
970
971static inline pte_t pte_mkyoung(pte_t pte)
972{
973 pte_val(pte) |= _PAGE_YOUNG;
974 if (pte_val(pte) & _PAGE_READ)
975 pte_val(pte) &= ~_PAGE_INVALID;
976 return pte;
977}
978
979static inline pte_t pte_mkspecial(pte_t pte)
980{
981 pte_val(pte) |= _PAGE_SPECIAL;
982 return pte;
983}
984
985#ifdef CONFIG_HUGETLB_PAGE
986static inline pte_t pte_mkhuge(pte_t pte)
987{
988 pte_val(pte) |= _PAGE_LARGE;
989 return pte;
990}
991#endif
992
993#define IPTE_GLOBAL 0
994#define IPTE_LOCAL 1
995
996#define IPTE_NODAT 0x400
997#define IPTE_GUEST_ASCE 0x800
998
999static inline void __ptep_ipte(unsigned long address, pte_t *ptep,
1000 unsigned long opt, unsigned long asce,
1001 int local)
1002{
1003 unsigned long pto = (unsigned long) ptep;
1004
1005 if (__builtin_constant_p(opt) && opt == 0) {
1006 /* Invalidation + TLB flush for the pte */
1007 asm volatile(
1008 " .insn rrf,0xb2210000,%[r1],%[r2],0,%[m4]"
1009 : "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
1010 [m4] "i" (local));
1011 return;
1012 }
1013
1014 /* Invalidate ptes with options + TLB flush of the ptes */
1015 opt = opt | (asce & _ASCE_ORIGIN);
1016 asm volatile(
1017 " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
1018 : [r2] "+a" (address), [r3] "+a" (opt)
1019 : [r1] "a" (pto), [m4] "i" (local) : "memory");
1020}
1021
1022static inline void __ptep_ipte_range(unsigned long address, int nr,
1023 pte_t *ptep, int local)
1024{
1025 unsigned long pto = (unsigned long) ptep;
1026
1027 /* Invalidate a range of ptes + TLB flush of the ptes */
1028 do {
1029 asm volatile(
1030 " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
1031 : [r2] "+a" (address), [r3] "+a" (nr)
1032 : [r1] "a" (pto), [m4] "i" (local) : "memory");
1033 } while (nr != 255);
1034}
1035
1036/*
1037 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
1038 * both clear the TLB for the unmapped pte. The reason is that
1039 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
1040 * to modify an active pte. The sequence is
1041 * 1) ptep_get_and_clear
1042 * 2) set_pte_at
1043 * 3) flush_tlb_range
1044 * On s390 the tlb needs to get flushed with the modification of the pte
1045 * if the pte is active. The only way how this can be implemented is to
1046 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
1047 * is a nop.
1048 */
1049pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t);
1050pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t);
1051
1052#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1053static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1054 unsigned long addr, pte_t *ptep)
1055{
1056 pte_t pte = *ptep;
1057
1058 pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
1059 return pte_young(pte);
1060}
1061
1062#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1063static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1064 unsigned long address, pte_t *ptep)
1065{
1066 return ptep_test_and_clear_young(vma, address, ptep);
1067}
1068
1069#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1070static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1071 unsigned long addr, pte_t *ptep)
1072{
1073 return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1074}
1075
1076#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1077pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *);
1078void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long,
1079 pte_t *, pte_t, pte_t);
1080
1081#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
1082static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1083 unsigned long addr, pte_t *ptep)
1084{
1085 return ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
1086}
1087
1088/*
1089 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1090 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1091 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1092 * cannot be accessed while the batched unmap is running. In this case
1093 * full==1 and a simple pte_clear is enough. See tlb.h.
1094 */
1095#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1096static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1097 unsigned long addr,
1098 pte_t *ptep, int full)
1099{
1100 if (full) {
1101 pte_t pte = *ptep;
1102 *ptep = __pte(_PAGE_INVALID);
1103 return pte;
1104 }
1105 return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1106}
1107
1108#define __HAVE_ARCH_PTEP_SET_WRPROTECT
1109static inline void ptep_set_wrprotect(struct mm_struct *mm,
1110 unsigned long addr, pte_t *ptep)
1111{
1112 pte_t pte = *ptep;
1113
1114 if (pte_write(pte))
1115 ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
1116}
1117
1118#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1119static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1120 unsigned long addr, pte_t *ptep,
1121 pte_t entry, int dirty)
1122{
1123 if (pte_same(*ptep, entry))
1124 return 0;
1125 ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
1126 return 1;
1127}
1128
1129/*
1130 * Additional functions to handle KVM guest page tables
1131 */
1132void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
1133 pte_t *ptep, pte_t entry);
1134void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1135void ptep_notify(struct mm_struct *mm, unsigned long addr,
1136 pte_t *ptep, unsigned long bits);
1137int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr,
1138 pte_t *ptep, int prot, unsigned long bit);
1139void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
1140 pte_t *ptep , int reset);
1141void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1142int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
1143 pte_t *sptep, pte_t *tptep, pte_t pte);
1144void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
1145
1146bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address,
1147 pte_t *ptep);
1148int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1149 unsigned char key, bool nq);
1150int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1151 unsigned char key, unsigned char *oldkey,
1152 bool nq, bool mr, bool mc);
1153int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr);
1154int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1155 unsigned char *key);
1156
1157int set_pgste_bits(struct mm_struct *mm, unsigned long addr,
1158 unsigned long bits, unsigned long value);
1159int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep);
1160int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
1161 unsigned long *oldpte, unsigned long *oldpgste);
1162void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr);
1163void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr);
1164void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr);
1165void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
1166
1167/*
1168 * Certain architectures need to do special things when PTEs
1169 * within a page table are directly modified. Thus, the following
1170 * hook is made available.
1171 */
1172static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1173 pte_t *ptep, pte_t entry)
1174{
1175 if (!MACHINE_HAS_NX)
1176 pte_val(entry) &= ~_PAGE_NOEXEC;
1177 if (pte_present(entry))
1178 pte_val(entry) &= ~_PAGE_UNUSED;
1179 if (mm_has_pgste(mm))
1180 ptep_set_pte_at(mm, addr, ptep, entry);
1181 else
1182 *ptep = entry;
1183}
1184
1185/*
1186 * Conversion functions: convert a page and protection to a page entry,
1187 * and a page entry and page directory to the page they refer to.
1188 */
1189static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1190{
1191 pte_t __pte;
1192 pte_val(__pte) = physpage + pgprot_val(pgprot);
1193 return pte_mkyoung(__pte);
1194}
1195
1196static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1197{
1198 unsigned long physpage = page_to_phys(page);
1199 pte_t __pte = mk_pte_phys(physpage, pgprot);
1200
1201 if (pte_write(__pte) && PageDirty(page))
1202 __pte = pte_mkdirty(__pte);
1203 return __pte;
1204}
1205
1206#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1207#define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
1208#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1209#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1210#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
1211
1212#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1213#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1214#define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN)
1215#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1216
1217/*
1218 * The pgd_offset function *always* adds the index for the top-level
1219 * region/segment table. This is done to get a sequence like the
1220 * following to work:
1221 * pgdp = pgd_offset(current->mm, addr);
1222 * pgd = READ_ONCE(*pgdp);
1223 * p4dp = p4d_offset(&pgd, addr);
1224 * ...
1225 * The subsequent p4d_offset, pud_offset and pmd_offset functions
1226 * only add an index if they dereferenced the pointer.
1227 */
1228static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
1229{
1230 unsigned long rste;
1231 unsigned int shift;
1232
1233 /* Get the first entry of the top level table */
1234 rste = pgd_val(*pgd);
1235 /* Pick up the shift from the table type of the first entry */
1236 shift = ((rste & _REGION_ENTRY_TYPE_MASK) >> 2) * 11 + 20;
1237 return pgd + ((address >> shift) & (PTRS_PER_PGD - 1));
1238}
1239
1240#define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
1241#define pgd_offset_k(address) pgd_offset(&init_mm, address)
1242
1243static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
1244{
1245 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
1246 return (p4d_t *) pgd_deref(*pgd) + p4d_index(address);
1247 return (p4d_t *) pgd;
1248}
1249
1250static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
1251{
1252 if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
1253 return (pud_t *) p4d_deref(*p4d) + pud_index(address);
1254 return (pud_t *) p4d;
1255}
1256
1257static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1258{
1259 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
1260 return (pmd_t *) pud_deref(*pud) + pmd_index(address);
1261 return (pmd_t *) pud;
1262}
1263
1264static inline pte_t *pte_offset(pmd_t *pmd, unsigned long address)
1265{
1266 return (pte_t *) pmd_deref(*pmd) + pte_index(address);
1267}
1268
1269#define pte_offset_kernel(pmd, address) pte_offset(pmd, address)
1270#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1271#define pte_unmap(pte) do { } while (0)
1272
1273static inline bool gup_fast_permitted(unsigned long start, int nr_pages)
1274{
1275 unsigned long len, end;
1276
1277 len = (unsigned long) nr_pages << PAGE_SHIFT;
1278 end = start + len;
1279 if (end < start)
1280 return false;
1281 return end <= current->mm->context.asce_limit;
1282}
1283#define gup_fast_permitted gup_fast_permitted
1284
1285#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1286#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1287#define pte_page(x) pfn_to_page(pte_pfn(x))
1288
1289#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1290#define pud_page(pud) pfn_to_page(pud_pfn(pud))
1291#define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
1292#define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
1293
1294static inline pmd_t pmd_wrprotect(pmd_t pmd)
1295{
1296 pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
1297 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1298 return pmd;
1299}
1300
1301static inline pmd_t pmd_mkwrite(pmd_t pmd)
1302{
1303 pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
1304 if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1305 return pmd;
1306 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1307 return pmd;
1308}
1309
1310static inline pmd_t pmd_mkclean(pmd_t pmd)
1311{
1312 if (pmd_large(pmd)) {
1313 pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
1314 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1315 }
1316 return pmd;
1317}
1318
1319static inline pmd_t pmd_mkdirty(pmd_t pmd)
1320{
1321 if (pmd_large(pmd)) {
1322 pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY |
1323 _SEGMENT_ENTRY_SOFT_DIRTY;
1324 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1325 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1326 }
1327 return pmd;
1328}
1329
1330static inline pud_t pud_wrprotect(pud_t pud)
1331{
1332 pud_val(pud) &= ~_REGION3_ENTRY_WRITE;
1333 pud_val(pud) |= _REGION_ENTRY_PROTECT;
1334 return pud;
1335}
1336
1337static inline pud_t pud_mkwrite(pud_t pud)
1338{
1339 pud_val(pud) |= _REGION3_ENTRY_WRITE;
1340 if (pud_large(pud) && !(pud_val(pud) & _REGION3_ENTRY_DIRTY))
1341 return pud;
1342 pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1343 return pud;
1344}
1345
1346static inline pud_t pud_mkclean(pud_t pud)
1347{
1348 if (pud_large(pud)) {
1349 pud_val(pud) &= ~_REGION3_ENTRY_DIRTY;
1350 pud_val(pud) |= _REGION_ENTRY_PROTECT;
1351 }
1352 return pud;
1353}
1354
1355static inline pud_t pud_mkdirty(pud_t pud)
1356{
1357 if (pud_large(pud)) {
1358 pud_val(pud) |= _REGION3_ENTRY_DIRTY |
1359 _REGION3_ENTRY_SOFT_DIRTY;
1360 if (pud_val(pud) & _REGION3_ENTRY_WRITE)
1361 pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1362 }
1363 return pud;
1364}
1365
1366#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1367static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1368{
1369 /*
1370 * pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX
1371 * (see __Pxxx / __Sxxx). Convert to segment table entry format.
1372 */
1373 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1374 return pgprot_val(SEGMENT_NONE);
1375 if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
1376 return pgprot_val(SEGMENT_RO);
1377 if (pgprot_val(pgprot) == pgprot_val(PAGE_RX))
1378 return pgprot_val(SEGMENT_RX);
1379 if (pgprot_val(pgprot) == pgprot_val(PAGE_RW))
1380 return pgprot_val(SEGMENT_RW);
1381 return pgprot_val(SEGMENT_RWX);
1382}
1383
1384static inline pmd_t pmd_mkyoung(pmd_t pmd)
1385{
1386 if (pmd_large(pmd)) {
1387 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1388 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1389 pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
1390 }
1391 return pmd;
1392}
1393
1394static inline pmd_t pmd_mkold(pmd_t pmd)
1395{
1396 if (pmd_large(pmd)) {
1397 pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
1398 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1399 }
1400 return pmd;
1401}
1402
1403static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1404{
1405 if (pmd_large(pmd)) {
1406 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
1407 _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
1408 _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY;
1409 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1410 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1411 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1412 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1413 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1414 return pmd;
1415 }
1416 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN;
1417 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1418 return pmd;
1419}
1420
1421static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1422{
1423 pmd_t __pmd;
1424 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1425 return __pmd;
1426}
1427
1428#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1429
1430static inline void __pmdp_csp(pmd_t *pmdp)
1431{
1432 csp((unsigned int *)pmdp + 1, pmd_val(*pmdp),
1433 pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1434}
1435
1436#define IDTE_GLOBAL 0
1437#define IDTE_LOCAL 1
1438
1439#define IDTE_PTOA 0x0800
1440#define IDTE_NODAT 0x1000
1441#define IDTE_GUEST_ASCE 0x2000
1442
1443static inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
1444 unsigned long opt, unsigned long asce,
1445 int local)
1446{
1447 unsigned long sto;
1448
1449 sto = (unsigned long) pmdp - pmd_index(addr) * sizeof(pmd_t);
1450 if (__builtin_constant_p(opt) && opt == 0) {
1451 /* flush without guest asce */
1452 asm volatile(
1453 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1454 : "+m" (*pmdp)
1455 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)),
1456 [m4] "i" (local)
1457 : "cc" );
1458 } else {
1459 /* flush with guest asce */
1460 asm volatile(
1461 " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1462 : "+m" (*pmdp)
1463 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt),
1464 [r3] "a" (asce), [m4] "i" (local)
1465 : "cc" );
1466 }
1467}
1468
1469static inline void __pudp_idte(unsigned long addr, pud_t *pudp,
1470 unsigned long opt, unsigned long asce,
1471 int local)
1472{
1473 unsigned long r3o;
1474
1475 r3o = (unsigned long) pudp - pud_index(addr) * sizeof(pud_t);
1476 r3o |= _ASCE_TYPE_REGION3;
1477 if (__builtin_constant_p(opt) && opt == 0) {
1478 /* flush without guest asce */
1479 asm volatile(
1480 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1481 : "+m" (*pudp)
1482 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)),
1483 [m4] "i" (local)
1484 : "cc");
1485 } else {
1486 /* flush with guest asce */
1487 asm volatile(
1488 " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1489 : "+m" (*pudp)
1490 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt),
1491 [r3] "a" (asce), [m4] "i" (local)
1492 : "cc" );
1493 }
1494}
1495
1496pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1497pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1498pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t);
1499
1500#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1501
1502#define __HAVE_ARCH_PGTABLE_DEPOSIT
1503void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1504 pgtable_t pgtable);
1505
1506#define __HAVE_ARCH_PGTABLE_WITHDRAW
1507pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1508
1509#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1510static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
1511 unsigned long addr, pmd_t *pmdp,
1512 pmd_t entry, int dirty)
1513{
1514 VM_BUG_ON(addr & ~HPAGE_MASK);
1515
1516 entry = pmd_mkyoung(entry);
1517 if (dirty)
1518 entry = pmd_mkdirty(entry);
1519 if (pmd_val(*pmdp) == pmd_val(entry))
1520 return 0;
1521 pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
1522 return 1;
1523}
1524
1525#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1526static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1527 unsigned long addr, pmd_t *pmdp)
1528{
1529 pmd_t pmd = *pmdp;
1530
1531 pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
1532 return pmd_young(pmd);
1533}
1534
1535#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1536static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
1537 unsigned long addr, pmd_t *pmdp)
1538{
1539 VM_BUG_ON(addr & ~HPAGE_MASK);
1540 return pmdp_test_and_clear_young(vma, addr, pmdp);
1541}
1542
1543static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1544 pmd_t *pmdp, pmd_t entry)
1545{
1546 if (!MACHINE_HAS_NX)
1547 pmd_val(entry) &= ~_SEGMENT_ENTRY_NOEXEC;
1548 *pmdp = entry;
1549}
1550
1551static inline pmd_t pmd_mkhuge(pmd_t pmd)
1552{
1553 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1554 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1555 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1556 return pmd;
1557}
1558
1559#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1560static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1561 unsigned long addr, pmd_t *pmdp)
1562{
1563 return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1564}
1565
1566#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
1567static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
1568 unsigned long addr,
1569 pmd_t *pmdp, int full)
1570{
1571 if (full) {
1572 pmd_t pmd = *pmdp;
1573 *pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
1574 return pmd;
1575 }
1576 return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1577}
1578
1579#define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
1580static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
1581 unsigned long addr, pmd_t *pmdp)
1582{
1583 return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
1584}
1585
1586#define __HAVE_ARCH_PMDP_INVALIDATE
1587static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
1588 unsigned long addr, pmd_t *pmdp)
1589{
1590 pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1591
1592 return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
1593}
1594
1595#define __HAVE_ARCH_PMDP_SET_WRPROTECT
1596static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1597 unsigned long addr, pmd_t *pmdp)
1598{
1599 pmd_t pmd = *pmdp;
1600
1601 if (pmd_write(pmd))
1602 pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
1603}
1604
1605static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1606 unsigned long address,
1607 pmd_t *pmdp)
1608{
1609 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
1610}
1611#define pmdp_collapse_flush pmdp_collapse_flush
1612
1613#define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1614#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1615
1616static inline int pmd_trans_huge(pmd_t pmd)
1617{
1618 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1619}
1620
1621#define has_transparent_hugepage has_transparent_hugepage
1622static inline int has_transparent_hugepage(void)
1623{
1624 return MACHINE_HAS_EDAT1 ? 1 : 0;
1625}
1626#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1627
1628/*
1629 * 64 bit swap entry format:
1630 * A page-table entry has some bits we have to treat in a special way.
1631 * Bits 52 and bit 55 have to be zero, otherwise a specification
1632 * exception will occur instead of a page translation exception. The
1633 * specification exception has the bad habit not to store necessary
1634 * information in the lowcore.
1635 * Bits 54 and 63 are used to indicate the page type.
1636 * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
1637 * This leaves the bits 0-51 and bits 56-62 to store type and offset.
1638 * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51
1639 * for the offset.
1640 * | offset |01100|type |00|
1641 * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
1642 * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
1643 */
1644
1645#define __SWP_OFFSET_MASK ((1UL << 52) - 1)
1646#define __SWP_OFFSET_SHIFT 12
1647#define __SWP_TYPE_MASK ((1UL << 5) - 1)
1648#define __SWP_TYPE_SHIFT 2
1649
1650static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1651{
1652 pte_t pte;
1653
1654 pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT;
1655 pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
1656 pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
1657 return pte;
1658}
1659
1660static inline unsigned long __swp_type(swp_entry_t entry)
1661{
1662 return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
1663}
1664
1665static inline unsigned long __swp_offset(swp_entry_t entry)
1666{
1667 return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
1668}
1669
1670static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
1671{
1672 return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
1673}
1674
1675#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1676#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1677
1678#define kern_addr_valid(addr) (1)
1679
1680extern int vmem_add_mapping(unsigned long start, unsigned long size);
1681extern int vmem_remove_mapping(unsigned long start, unsigned long size);
1682extern int s390_enable_sie(void);
1683extern int s390_enable_skey(void);
1684extern void s390_reset_cmma(struct mm_struct *mm);
1685
1686/* s390 has a private copy of get unmapped area to deal with cache synonyms */
1687#define HAVE_ARCH_UNMAPPED_AREA
1688#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1689
1690/*
1691 * No page table caches to initialise
1692 */
1693static inline void pgtable_cache_init(void) { }
1694static inline void check_pgt_cache(void) { }
1695
1696#include <asm-generic/pgtable.h>
1697
1698#endif /* _S390_PAGE_H */