Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * S390 version
3 * Copyright IBM Corp. 1999, 2000
4 * Author(s): Hartmut Penner (hp@de.ibm.com)
5 * Ulrich Weigand (weigand@de.ibm.com)
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 *
8 * Derived from "include/asm-i386/pgtable.h"
9 */
10
11#ifndef _ASM_S390_PGTABLE_H
12#define _ASM_S390_PGTABLE_H
13
14/*
15 * The Linux memory management assumes a three-level page table setup.
16 * For s390 64 bit we use up to four of the five levels the hardware
17 * provides (region first tables are not used).
18 *
19 * The "pgd_xxx()" functions are trivial for a folded two-level
20 * setup: the pgd is never bad, and a pmd always exists (as it's folded
21 * into the pgd entry)
22 *
23 * This file contains the functions and defines necessary to modify and use
24 * the S390 page table tree.
25 */
26#ifndef __ASSEMBLY__
27#include <linux/sched.h>
28#include <linux/mm_types.h>
29#include <linux/page-flags.h>
30#include <linux/radix-tree.h>
31#include <linux/atomic.h>
32#include <asm/bug.h>
33#include <asm/page.h>
34
35extern pgd_t swapper_pg_dir[];
36extern void paging_init(void);
37extern void vmem_map_init(void);
38pmd_t *vmem_pmd_alloc(void);
39pte_t *vmem_pte_alloc(void);
40
41enum {
42 PG_DIRECT_MAP_4K = 0,
43 PG_DIRECT_MAP_1M,
44 PG_DIRECT_MAP_2G,
45 PG_DIRECT_MAP_MAX
46};
47
48extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
49
50static inline void update_page_count(int level, long count)
51{
52 if (IS_ENABLED(CONFIG_PROC_FS))
53 atomic_long_add(count, &direct_pages_count[level]);
54}
55
56struct seq_file;
57void arch_report_meminfo(struct seq_file *m);
58
59/*
60 * The S390 doesn't have any external MMU info: the kernel page
61 * tables contain all the necessary information.
62 */
63#define update_mmu_cache(vma, address, ptep) do { } while (0)
64#define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
65
66/*
67 * ZERO_PAGE is a global shared page that is always zero; used
68 * for zero-mapped memory areas etc..
69 */
70
71extern unsigned long empty_zero_page;
72extern unsigned long zero_page_mask;
73
74#define ZERO_PAGE(vaddr) \
75 (virt_to_page((void *)(empty_zero_page + \
76 (((unsigned long)(vaddr)) &zero_page_mask))))
77#define __HAVE_COLOR_ZERO_PAGE
78
79/* TODO: s390 cannot support io_remap_pfn_range... */
80#endif /* !__ASSEMBLY__ */
81
82/*
83 * PMD_SHIFT determines the size of the area a second-level page
84 * table can map
85 * PGDIR_SHIFT determines what a third-level page table entry can map
86 */
87#define PMD_SHIFT 20
88#define PUD_SHIFT 31
89#define PGDIR_SHIFT 42
90
91#define PMD_SIZE (1UL << PMD_SHIFT)
92#define PMD_MASK (~(PMD_SIZE-1))
93#define PUD_SIZE (1UL << PUD_SHIFT)
94#define PUD_MASK (~(PUD_SIZE-1))
95#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
96#define PGDIR_MASK (~(PGDIR_SIZE-1))
97
98/*
99 * entries per page directory level: the S390 is two-level, so
100 * we don't really have any PMD directory physically.
101 * for S390 segment-table entries are combined to one PGD
102 * that leads to 1024 pte per pgd
103 */
104#define PTRS_PER_PTE 256
105#define PTRS_PER_PMD 2048
106#define PTRS_PER_PUD 2048
107#define PTRS_PER_PGD 2048
108
109#define FIRST_USER_ADDRESS 0UL
110
111#define pte_ERROR(e) \
112 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
113#define pmd_ERROR(e) \
114 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
115#define pud_ERROR(e) \
116 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
117#define pgd_ERROR(e) \
118 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
119
120#ifndef __ASSEMBLY__
121/*
122 * The vmalloc and module area will always be on the topmost area of the
123 * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules.
124 * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
125 * modules will reside. That makes sure that inter module branches always
126 * happen without trampolines and in addition the placement within a 2GB frame
127 * is branch prediction unit friendly.
128 */
129extern unsigned long VMALLOC_START;
130extern unsigned long VMALLOC_END;
131extern struct page *vmemmap;
132
133#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
134
135extern unsigned long MODULES_VADDR;
136extern unsigned long MODULES_END;
137#define MODULES_VADDR MODULES_VADDR
138#define MODULES_END MODULES_END
139#define MODULES_LEN (1UL << 31)
140
141static inline int is_module_addr(void *addr)
142{
143 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
144 if (addr < (void *)MODULES_VADDR)
145 return 0;
146 if (addr > (void *)MODULES_END)
147 return 0;
148 return 1;
149}
150
151/*
152 * A 64 bit pagetable entry of S390 has following format:
153 * | PFRA |0IPC| OS |
154 * 0000000000111111111122222222223333333333444444444455555555556666
155 * 0123456789012345678901234567890123456789012345678901234567890123
156 *
157 * I Page-Invalid Bit: Page is not available for address-translation
158 * P Page-Protection Bit: Store access not possible for page
159 * C Change-bit override: HW is not required to set change bit
160 *
161 * A 64 bit segmenttable entry of S390 has following format:
162 * | P-table origin | TT
163 * 0000000000111111111122222222223333333333444444444455555555556666
164 * 0123456789012345678901234567890123456789012345678901234567890123
165 *
166 * I Segment-Invalid Bit: Segment is not available for address-translation
167 * C Common-Segment Bit: Segment is not private (PoP 3-30)
168 * P Page-Protection Bit: Store access not possible for page
169 * TT Type 00
170 *
171 * A 64 bit region table entry of S390 has following format:
172 * | S-table origin | TF TTTL
173 * 0000000000111111111122222222223333333333444444444455555555556666
174 * 0123456789012345678901234567890123456789012345678901234567890123
175 *
176 * I Segment-Invalid Bit: Segment is not available for address-translation
177 * TT Type 01
178 * TF
179 * TL Table length
180 *
181 * The 64 bit regiontable origin of S390 has following format:
182 * | region table origon | DTTL
183 * 0000000000111111111122222222223333333333444444444455555555556666
184 * 0123456789012345678901234567890123456789012345678901234567890123
185 *
186 * X Space-Switch event:
187 * G Segment-Invalid Bit:
188 * P Private-Space Bit:
189 * S Storage-Alteration:
190 * R Real space
191 * TL Table-Length:
192 *
193 * A storage key has the following format:
194 * | ACC |F|R|C|0|
195 * 0 3 4 5 6 7
196 * ACC: access key
197 * F : fetch protection bit
198 * R : referenced bit
199 * C : changed bit
200 */
201
202/* Hardware bits in the page table entry */
203#define _PAGE_PROTECT 0x200 /* HW read-only bit */
204#define _PAGE_INVALID 0x400 /* HW invalid bit */
205#define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
206
207/* Software bits in the page table entry */
208#define _PAGE_PRESENT 0x001 /* SW pte present bit */
209#define _PAGE_YOUNG 0x004 /* SW pte young bit */
210#define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
211#define _PAGE_READ 0x010 /* SW pte read bit */
212#define _PAGE_WRITE 0x020 /* SW pte write bit */
213#define _PAGE_SPECIAL 0x040 /* SW associated with special page */
214#define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */
215#define __HAVE_ARCH_PTE_SPECIAL
216
217#ifdef CONFIG_MEM_SOFT_DIRTY
218#define _PAGE_SOFT_DIRTY 0x002 /* SW pte soft dirty bit */
219#else
220#define _PAGE_SOFT_DIRTY 0x000
221#endif
222
223/* Set of bits not changed in pte_modify */
224#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
225 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
226
227/*
228 * handle_pte_fault uses pte_present and pte_none to find out the pte type
229 * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
230 * distinguish present from not-present ptes. It is changed only with the page
231 * table lock held.
232 *
233 * The following table gives the different possible bit combinations for
234 * the pte hardware and software bits in the last 12 bits of a pte
235 * (. unassigned bit, x don't care, t swap type):
236 *
237 * 842100000000
238 * 000084210000
239 * 000000008421
240 * .IR.uswrdy.p
241 * empty .10.00000000
242 * swap .11..ttttt.0
243 * prot-none, clean, old .11.xx0000.1
244 * prot-none, clean, young .11.xx0001.1
245 * prot-none, dirty, old .11.xx0010.1
246 * prot-none, dirty, young .11.xx0011.1
247 * read-only, clean, old .11.xx0100.1
248 * read-only, clean, young .01.xx0101.1
249 * read-only, dirty, old .11.xx0110.1
250 * read-only, dirty, young .01.xx0111.1
251 * read-write, clean, old .11.xx1100.1
252 * read-write, clean, young .01.xx1101.1
253 * read-write, dirty, old .10.xx1110.1
254 * read-write, dirty, young .00.xx1111.1
255 * HW-bits: R read-only, I invalid
256 * SW-bits: p present, y young, d dirty, r read, w write, s special,
257 * u unused, l large
258 *
259 * pte_none is true for the bit pattern .10.00000000, pte == 0x400
260 * pte_swap is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
261 * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
262 */
263
264/* Bits in the segment/region table address-space-control-element */
265#define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
266#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
267#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
268#define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
269#define _ASCE_REAL_SPACE 0x20 /* real space control */
270#define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
271#define _ASCE_TYPE_REGION1 0x0c /* region first table type */
272#define _ASCE_TYPE_REGION2 0x08 /* region second table type */
273#define _ASCE_TYPE_REGION3 0x04 /* region third table type */
274#define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
275#define _ASCE_TABLE_LENGTH 0x03 /* region table length */
276
277/* Bits in the region table entry */
278#define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
279#define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
280#define _REGION_ENTRY_OFFSET 0xc0 /* region table offset */
281#define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
282#define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
283#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
284#define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
285#define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
286#define _REGION_ENTRY_LENGTH 0x03 /* region third length */
287
288#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
289#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
290#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
291#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
292#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
293#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
294
295#define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address */
296#define _REGION3_ENTRY_ORIGIN ~0x7ffUL/* region third table origin */
297
298#define _REGION3_ENTRY_DIRTY 0x2000 /* SW region dirty bit */
299#define _REGION3_ENTRY_YOUNG 0x1000 /* SW region young bit */
300#define _REGION3_ENTRY_LARGE 0x0400 /* RTTE-format control, large page */
301#define _REGION3_ENTRY_READ 0x0002 /* SW region read bit */
302#define _REGION3_ENTRY_WRITE 0x0001 /* SW region write bit */
303
304#ifdef CONFIG_MEM_SOFT_DIRTY
305#define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */
306#else
307#define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */
308#endif
309
310#define _REGION_ENTRY_BITS 0xfffffffffffff227UL
311#define _REGION_ENTRY_BITS_LARGE 0xffffffff8000fe27UL
312
313/* Bits in the segment table entry */
314#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
315#define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
316#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
317#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
318#define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
319#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
320
321#define _SEGMENT_ENTRY (0)
322#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
323
324#define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */
325#define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
326#define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
327#define _SEGMENT_ENTRY_WRITE 0x0002 /* SW segment write bit */
328#define _SEGMENT_ENTRY_READ 0x0001 /* SW segment read bit */
329
330#ifdef CONFIG_MEM_SOFT_DIRTY
331#define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */
332#else
333#define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */
334#endif
335
336/*
337 * Segment table and region3 table entry encoding
338 * (R = read-only, I = invalid, y = young bit):
339 * dy..R...I...wr
340 * prot-none, clean, old 00..1...1...00
341 * prot-none, clean, young 01..1...1...00
342 * prot-none, dirty, old 10..1...1...00
343 * prot-none, dirty, young 11..1...1...00
344 * read-only, clean, old 00..1...1...01
345 * read-only, clean, young 01..1...0...01
346 * read-only, dirty, old 10..1...1...01
347 * read-only, dirty, young 11..1...0...01
348 * read-write, clean, old 00..1...1...11
349 * read-write, clean, young 01..1...0...11
350 * read-write, dirty, old 10..0...1...11
351 * read-write, dirty, young 11..0...0...11
352 * The segment table origin is used to distinguish empty (origin==0) from
353 * read-write, old segment table entries (origin!=0)
354 * HW-bits: R read-only, I invalid
355 * SW-bits: y young, d dirty, r read, w write
356 */
357
358/* Page status table bits for virtualization */
359#define PGSTE_ACC_BITS 0xf000000000000000UL
360#define PGSTE_FP_BIT 0x0800000000000000UL
361#define PGSTE_PCL_BIT 0x0080000000000000UL
362#define PGSTE_HR_BIT 0x0040000000000000UL
363#define PGSTE_HC_BIT 0x0020000000000000UL
364#define PGSTE_GR_BIT 0x0004000000000000UL
365#define PGSTE_GC_BIT 0x0002000000000000UL
366#define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */
367#define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */
368#define PGSTE_VSIE_BIT 0x0000200000000000UL /* ref'd in a shadow table */
369
370/* Guest Page State used for virtualization */
371#define _PGSTE_GPS_ZERO 0x0000000080000000UL
372#define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
373#define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
374#define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
375
376/*
377 * A user page table pointer has the space-switch-event bit, the
378 * private-space-control bit and the storage-alteration-event-control
379 * bit set. A kernel page table pointer doesn't need them.
380 */
381#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
382 _ASCE_ALT_EVENT)
383
384/*
385 * Page protection definitions.
386 */
387#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
388#define PAGE_READ __pgprot(_PAGE_PRESENT | _PAGE_READ | \
389 _PAGE_INVALID | _PAGE_PROTECT)
390#define PAGE_WRITE __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
391 _PAGE_INVALID | _PAGE_PROTECT)
392
393#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
394 _PAGE_YOUNG | _PAGE_DIRTY)
395#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
396 _PAGE_YOUNG | _PAGE_DIRTY)
397#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
398 _PAGE_PROTECT)
399
400/*
401 * On s390 the page table entry has an invalid bit and a read-only bit.
402 * Read permission implies execute permission and write permission
403 * implies read permission.
404 */
405 /*xwr*/
406#define __P000 PAGE_NONE
407#define __P001 PAGE_READ
408#define __P010 PAGE_READ
409#define __P011 PAGE_READ
410#define __P100 PAGE_READ
411#define __P101 PAGE_READ
412#define __P110 PAGE_READ
413#define __P111 PAGE_READ
414
415#define __S000 PAGE_NONE
416#define __S001 PAGE_READ
417#define __S010 PAGE_WRITE
418#define __S011 PAGE_WRITE
419#define __S100 PAGE_READ
420#define __S101 PAGE_READ
421#define __S110 PAGE_WRITE
422#define __S111 PAGE_WRITE
423
424/*
425 * Segment entry (large page) protection definitions.
426 */
427#define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
428 _SEGMENT_ENTRY_PROTECT)
429#define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_PROTECT | \
430 _SEGMENT_ENTRY_READ)
431#define SEGMENT_WRITE __pgprot(_SEGMENT_ENTRY_READ | \
432 _SEGMENT_ENTRY_WRITE)
433#define SEGMENT_KERNEL __pgprot(_SEGMENT_ENTRY | \
434 _SEGMENT_ENTRY_LARGE | \
435 _SEGMENT_ENTRY_READ | \
436 _SEGMENT_ENTRY_WRITE | \
437 _SEGMENT_ENTRY_YOUNG | \
438 _SEGMENT_ENTRY_DIRTY)
439#define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY | \
440 _SEGMENT_ENTRY_LARGE | \
441 _SEGMENT_ENTRY_READ | \
442 _SEGMENT_ENTRY_YOUNG | \
443 _SEGMENT_ENTRY_PROTECT)
444
445/*
446 * Region3 entry (large page) protection definitions.
447 */
448
449#define REGION3_KERNEL __pgprot(_REGION_ENTRY_TYPE_R3 | \
450 _REGION3_ENTRY_LARGE | \
451 _REGION3_ENTRY_READ | \
452 _REGION3_ENTRY_WRITE | \
453 _REGION3_ENTRY_YOUNG | \
454 _REGION3_ENTRY_DIRTY)
455#define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
456 _REGION3_ENTRY_LARGE | \
457 _REGION3_ENTRY_READ | \
458 _REGION3_ENTRY_YOUNG | \
459 _REGION_ENTRY_PROTECT)
460
461static inline int mm_has_pgste(struct mm_struct *mm)
462{
463#ifdef CONFIG_PGSTE
464 if (unlikely(mm->context.has_pgste))
465 return 1;
466#endif
467 return 0;
468}
469
470static inline int mm_alloc_pgste(struct mm_struct *mm)
471{
472#ifdef CONFIG_PGSTE
473 if (unlikely(mm->context.alloc_pgste))
474 return 1;
475#endif
476 return 0;
477}
478
479/*
480 * In the case that a guest uses storage keys
481 * faults should no longer be backed by zero pages
482 */
483#define mm_forbids_zeropage mm_use_skey
484static inline int mm_use_skey(struct mm_struct *mm)
485{
486#ifdef CONFIG_PGSTE
487 if (mm->context.use_skey)
488 return 1;
489#endif
490 return 0;
491}
492
493static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new)
494{
495 register unsigned long reg2 asm("2") = old;
496 register unsigned long reg3 asm("3") = new;
497 unsigned long address = (unsigned long)ptr | 1;
498
499 asm volatile(
500 " csp %0,%3"
501 : "+d" (reg2), "+m" (*ptr)
502 : "d" (reg3), "d" (address)
503 : "cc");
504}
505
506static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new)
507{
508 register unsigned long reg2 asm("2") = old;
509 register unsigned long reg3 asm("3") = new;
510 unsigned long address = (unsigned long)ptr | 1;
511
512 asm volatile(
513 " .insn rre,0xb98a0000,%0,%3"
514 : "+d" (reg2), "+m" (*ptr)
515 : "d" (reg3), "d" (address)
516 : "cc");
517}
518
519#define CRDTE_DTT_PAGE 0x00UL
520#define CRDTE_DTT_SEGMENT 0x10UL
521#define CRDTE_DTT_REGION3 0x14UL
522#define CRDTE_DTT_REGION2 0x18UL
523#define CRDTE_DTT_REGION1 0x1cUL
524
525static inline void crdte(unsigned long old, unsigned long new,
526 unsigned long table, unsigned long dtt,
527 unsigned long address, unsigned long asce)
528{
529 register unsigned long reg2 asm("2") = old;
530 register unsigned long reg3 asm("3") = new;
531 register unsigned long reg4 asm("4") = table | dtt;
532 register unsigned long reg5 asm("5") = address;
533
534 asm volatile(".insn rrf,0xb98f0000,%0,%2,%4,0"
535 : "+d" (reg2)
536 : "d" (reg3), "d" (reg4), "d" (reg5), "a" (asce)
537 : "memory", "cc");
538}
539
540/*
541 * pgd/pmd/pte query functions
542 */
543static inline int pgd_present(pgd_t pgd)
544{
545 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
546 return 1;
547 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
548}
549
550static inline int pgd_none(pgd_t pgd)
551{
552 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
553 return 0;
554 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
555}
556
557static inline int pgd_bad(pgd_t pgd)
558{
559 /*
560 * With dynamic page table levels the pgd can be a region table
561 * entry or a segment table entry. Check for the bit that are
562 * invalid for either table entry.
563 */
564 unsigned long mask =
565 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
566 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
567 return (pgd_val(pgd) & mask) != 0;
568}
569
570static inline int pud_present(pud_t pud)
571{
572 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
573 return 1;
574 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
575}
576
577static inline int pud_none(pud_t pud)
578{
579 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
580 return 0;
581 return pud_val(pud) == _REGION3_ENTRY_EMPTY;
582}
583
584static inline int pud_large(pud_t pud)
585{
586 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
587 return 0;
588 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
589}
590
591static inline unsigned long pud_pfn(pud_t pud)
592{
593 unsigned long origin_mask;
594
595 origin_mask = _REGION3_ENTRY_ORIGIN;
596 if (pud_large(pud))
597 origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
598 return (pud_val(pud) & origin_mask) >> PAGE_SHIFT;
599}
600
601static inline int pmd_large(pmd_t pmd)
602{
603 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
604}
605
606static inline int pmd_bad(pmd_t pmd)
607{
608 if (pmd_large(pmd))
609 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
610 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
611}
612
613static inline int pud_bad(pud_t pud)
614{
615 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
616 return pmd_bad(__pmd(pud_val(pud)));
617 if (pud_large(pud))
618 return (pud_val(pud) & ~_REGION_ENTRY_BITS_LARGE) != 0;
619 return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
620}
621
622static inline int pmd_present(pmd_t pmd)
623{
624 return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID;
625}
626
627static inline int pmd_none(pmd_t pmd)
628{
629 return pmd_val(pmd) == _SEGMENT_ENTRY_INVALID;
630}
631
632static inline unsigned long pmd_pfn(pmd_t pmd)
633{
634 unsigned long origin_mask;
635
636 origin_mask = _SEGMENT_ENTRY_ORIGIN;
637 if (pmd_large(pmd))
638 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
639 return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
640}
641
642#define __HAVE_ARCH_PMD_WRITE
643static inline int pmd_write(pmd_t pmd)
644{
645 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
646}
647
648static inline int pmd_dirty(pmd_t pmd)
649{
650 int dirty = 1;
651 if (pmd_large(pmd))
652 dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
653 return dirty;
654}
655
656static inline int pmd_young(pmd_t pmd)
657{
658 int young = 1;
659 if (pmd_large(pmd))
660 young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
661 return young;
662}
663
664static inline int pte_present(pte_t pte)
665{
666 /* Bit pattern: (pte & 0x001) == 0x001 */
667 return (pte_val(pte) & _PAGE_PRESENT) != 0;
668}
669
670static inline int pte_none(pte_t pte)
671{
672 /* Bit pattern: pte == 0x400 */
673 return pte_val(pte) == _PAGE_INVALID;
674}
675
676static inline int pte_swap(pte_t pte)
677{
678 /* Bit pattern: (pte & 0x201) == 0x200 */
679 return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
680 == _PAGE_PROTECT;
681}
682
683static inline int pte_special(pte_t pte)
684{
685 return (pte_val(pte) & _PAGE_SPECIAL);
686}
687
688#define __HAVE_ARCH_PTE_SAME
689static inline int pte_same(pte_t a, pte_t b)
690{
691 return pte_val(a) == pte_val(b);
692}
693
694#ifdef CONFIG_NUMA_BALANCING
695static inline int pte_protnone(pte_t pte)
696{
697 return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
698}
699
700static inline int pmd_protnone(pmd_t pmd)
701{
702 /* pmd_large(pmd) implies pmd_present(pmd) */
703 return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
704}
705#endif
706
707static inline int pte_soft_dirty(pte_t pte)
708{
709 return pte_val(pte) & _PAGE_SOFT_DIRTY;
710}
711#define pte_swp_soft_dirty pte_soft_dirty
712
713static inline pte_t pte_mksoft_dirty(pte_t pte)
714{
715 pte_val(pte) |= _PAGE_SOFT_DIRTY;
716 return pte;
717}
718#define pte_swp_mksoft_dirty pte_mksoft_dirty
719
720static inline pte_t pte_clear_soft_dirty(pte_t pte)
721{
722 pte_val(pte) &= ~_PAGE_SOFT_DIRTY;
723 return pte;
724}
725#define pte_swp_clear_soft_dirty pte_clear_soft_dirty
726
727static inline int pmd_soft_dirty(pmd_t pmd)
728{
729 return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
730}
731
732static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
733{
734 pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY;
735 return pmd;
736}
737
738static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
739{
740 pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY;
741 return pmd;
742}
743
744/*
745 * query functions pte_write/pte_dirty/pte_young only work if
746 * pte_present() is true. Undefined behaviour if not..
747 */
748static inline int pte_write(pte_t pte)
749{
750 return (pte_val(pte) & _PAGE_WRITE) != 0;
751}
752
753static inline int pte_dirty(pte_t pte)
754{
755 return (pte_val(pte) & _PAGE_DIRTY) != 0;
756}
757
758static inline int pte_young(pte_t pte)
759{
760 return (pte_val(pte) & _PAGE_YOUNG) != 0;
761}
762
763#define __HAVE_ARCH_PTE_UNUSED
764static inline int pte_unused(pte_t pte)
765{
766 return pte_val(pte) & _PAGE_UNUSED;
767}
768
769/*
770 * pgd/pmd/pte modification functions
771 */
772
773static inline void pgd_clear(pgd_t *pgd)
774{
775 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
776 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
777}
778
779static inline void pud_clear(pud_t *pud)
780{
781 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
782 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
783}
784
785static inline void pmd_clear(pmd_t *pmdp)
786{
787 pmd_val(*pmdp) = _SEGMENT_ENTRY_INVALID;
788}
789
790static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
791{
792 pte_val(*ptep) = _PAGE_INVALID;
793}
794
795/*
796 * The following pte modification functions only work if
797 * pte_present() is true. Undefined behaviour if not..
798 */
799static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
800{
801 pte_val(pte) &= _PAGE_CHG_MASK;
802 pte_val(pte) |= pgprot_val(newprot);
803 /*
804 * newprot for PAGE_NONE, PAGE_READ and PAGE_WRITE has the
805 * invalid bit set, clear it again for readable, young pages
806 */
807 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
808 pte_val(pte) &= ~_PAGE_INVALID;
809 /*
810 * newprot for PAGE_READ and PAGE_WRITE has the page protection
811 * bit set, clear it again for writable, dirty pages
812 */
813 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
814 pte_val(pte) &= ~_PAGE_PROTECT;
815 return pte;
816}
817
818static inline pte_t pte_wrprotect(pte_t pte)
819{
820 pte_val(pte) &= ~_PAGE_WRITE;
821 pte_val(pte) |= _PAGE_PROTECT;
822 return pte;
823}
824
825static inline pte_t pte_mkwrite(pte_t pte)
826{
827 pte_val(pte) |= _PAGE_WRITE;
828 if (pte_val(pte) & _PAGE_DIRTY)
829 pte_val(pte) &= ~_PAGE_PROTECT;
830 return pte;
831}
832
833static inline pte_t pte_mkclean(pte_t pte)
834{
835 pte_val(pte) &= ~_PAGE_DIRTY;
836 pte_val(pte) |= _PAGE_PROTECT;
837 return pte;
838}
839
840static inline pte_t pte_mkdirty(pte_t pte)
841{
842 pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY;
843 if (pte_val(pte) & _PAGE_WRITE)
844 pte_val(pte) &= ~_PAGE_PROTECT;
845 return pte;
846}
847
848static inline pte_t pte_mkold(pte_t pte)
849{
850 pte_val(pte) &= ~_PAGE_YOUNG;
851 pte_val(pte) |= _PAGE_INVALID;
852 return pte;
853}
854
855static inline pte_t pte_mkyoung(pte_t pte)
856{
857 pte_val(pte) |= _PAGE_YOUNG;
858 if (pte_val(pte) & _PAGE_READ)
859 pte_val(pte) &= ~_PAGE_INVALID;
860 return pte;
861}
862
863static inline pte_t pte_mkspecial(pte_t pte)
864{
865 pte_val(pte) |= _PAGE_SPECIAL;
866 return pte;
867}
868
869#ifdef CONFIG_HUGETLB_PAGE
870static inline pte_t pte_mkhuge(pte_t pte)
871{
872 pte_val(pte) |= _PAGE_LARGE;
873 return pte;
874}
875#endif
876
877#define IPTE_GLOBAL 0
878#define IPTE_LOCAL 1
879
880static inline void __ptep_ipte(unsigned long address, pte_t *ptep, int local)
881{
882 unsigned long pto = (unsigned long) ptep;
883
884 /* Invalidation + TLB flush for the pte */
885 asm volatile(
886 " .insn rrf,0xb2210000,%[r1],%[r2],0,%[m4]"
887 : "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
888 [m4] "i" (local));
889}
890
891static inline void __ptep_ipte_range(unsigned long address, int nr,
892 pte_t *ptep, int local)
893{
894 unsigned long pto = (unsigned long) ptep;
895
896 /* Invalidate a range of ptes + TLB flush of the ptes */
897 do {
898 asm volatile(
899 " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
900 : [r2] "+a" (address), [r3] "+a" (nr)
901 : [r1] "a" (pto), [m4] "i" (local) : "memory");
902 } while (nr != 255);
903}
904
905/*
906 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
907 * both clear the TLB for the unmapped pte. The reason is that
908 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
909 * to modify an active pte. The sequence is
910 * 1) ptep_get_and_clear
911 * 2) set_pte_at
912 * 3) flush_tlb_range
913 * On s390 the tlb needs to get flushed with the modification of the pte
914 * if the pte is active. The only way how this can be implemented is to
915 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
916 * is a nop.
917 */
918pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t);
919pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t);
920
921#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
922static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
923 unsigned long addr, pte_t *ptep)
924{
925 pte_t pte = *ptep;
926
927 pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
928 return pte_young(pte);
929}
930
931#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
932static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
933 unsigned long address, pte_t *ptep)
934{
935 return ptep_test_and_clear_young(vma, address, ptep);
936}
937
938#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
939static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
940 unsigned long addr, pte_t *ptep)
941{
942 return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
943}
944
945#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
946pte_t ptep_modify_prot_start(struct mm_struct *, unsigned long, pte_t *);
947void ptep_modify_prot_commit(struct mm_struct *, unsigned long, pte_t *, pte_t);
948
949#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
950static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
951 unsigned long addr, pte_t *ptep)
952{
953 return ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
954}
955
956/*
957 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
958 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
959 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
960 * cannot be accessed while the batched unmap is running. In this case
961 * full==1 and a simple pte_clear is enough. See tlb.h.
962 */
963#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
964static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
965 unsigned long addr,
966 pte_t *ptep, int full)
967{
968 if (full) {
969 pte_t pte = *ptep;
970 *ptep = __pte(_PAGE_INVALID);
971 return pte;
972 }
973 return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
974}
975
976#define __HAVE_ARCH_PTEP_SET_WRPROTECT
977static inline void ptep_set_wrprotect(struct mm_struct *mm,
978 unsigned long addr, pte_t *ptep)
979{
980 pte_t pte = *ptep;
981
982 if (pte_write(pte))
983 ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
984}
985
986#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
987static inline int ptep_set_access_flags(struct vm_area_struct *vma,
988 unsigned long addr, pte_t *ptep,
989 pte_t entry, int dirty)
990{
991 if (pte_same(*ptep, entry))
992 return 0;
993 ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
994 return 1;
995}
996
997/*
998 * Additional functions to handle KVM guest page tables
999 */
1000void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
1001 pte_t *ptep, pte_t entry);
1002void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1003void ptep_notify(struct mm_struct *mm, unsigned long addr,
1004 pte_t *ptep, unsigned long bits);
1005int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr,
1006 pte_t *ptep, int prot, unsigned long bit);
1007void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
1008 pte_t *ptep , int reset);
1009void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1010int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
1011 pte_t *sptep, pte_t *tptep, pte_t pte);
1012void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
1013
1014bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long address);
1015int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1016 unsigned char key, bool nq);
1017int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1018 unsigned char key, unsigned char *oldkey,
1019 bool nq, bool mr, bool mc);
1020int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr);
1021int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1022 unsigned char *key);
1023
1024/*
1025 * Certain architectures need to do special things when PTEs
1026 * within a page table are directly modified. Thus, the following
1027 * hook is made available.
1028 */
1029static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1030 pte_t *ptep, pte_t entry)
1031{
1032 if (mm_has_pgste(mm))
1033 ptep_set_pte_at(mm, addr, ptep, entry);
1034 else
1035 *ptep = entry;
1036}
1037
1038/*
1039 * Conversion functions: convert a page and protection to a page entry,
1040 * and a page entry and page directory to the page they refer to.
1041 */
1042static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1043{
1044 pte_t __pte;
1045 pte_val(__pte) = physpage + pgprot_val(pgprot);
1046 return pte_mkyoung(__pte);
1047}
1048
1049static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1050{
1051 unsigned long physpage = page_to_phys(page);
1052 pte_t __pte = mk_pte_phys(physpage, pgprot);
1053
1054 if (pte_write(__pte) && PageDirty(page))
1055 __pte = pte_mkdirty(__pte);
1056 return __pte;
1057}
1058
1059#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1060#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1061#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1062#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
1063
1064#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
1065#define pgd_offset_k(address) pgd_offset(&init_mm, address)
1066
1067#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1068#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1069#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1070
1071static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
1072{
1073 pud_t *pud = (pud_t *) pgd;
1074 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
1075 pud = (pud_t *) pgd_deref(*pgd);
1076 return pud + pud_index(address);
1077}
1078
1079static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1080{
1081 pmd_t *pmd = (pmd_t *) pud;
1082 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
1083 pmd = (pmd_t *) pud_deref(*pud);
1084 return pmd + pmd_index(address);
1085}
1086
1087#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1088#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1089#define pte_page(x) pfn_to_page(pte_pfn(x))
1090
1091#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1092#define pud_page(pud) pfn_to_page(pud_pfn(pud))
1093
1094/* Find an entry in the lowest level page table.. */
1095#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
1096#define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
1097#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1098#define pte_unmap(pte) do { } while (0)
1099
1100static inline pmd_t pmd_wrprotect(pmd_t pmd)
1101{
1102 pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
1103 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1104 return pmd;
1105}
1106
1107static inline pmd_t pmd_mkwrite(pmd_t pmd)
1108{
1109 pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
1110 if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1111 return pmd;
1112 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1113 return pmd;
1114}
1115
1116static inline pmd_t pmd_mkclean(pmd_t pmd)
1117{
1118 if (pmd_large(pmd)) {
1119 pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
1120 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1121 }
1122 return pmd;
1123}
1124
1125static inline pmd_t pmd_mkdirty(pmd_t pmd)
1126{
1127 if (pmd_large(pmd)) {
1128 pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY |
1129 _SEGMENT_ENTRY_SOFT_DIRTY;
1130 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1131 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1132 }
1133 return pmd;
1134}
1135
1136static inline pud_t pud_wrprotect(pud_t pud)
1137{
1138 pud_val(pud) &= ~_REGION3_ENTRY_WRITE;
1139 pud_val(pud) |= _REGION_ENTRY_PROTECT;
1140 return pud;
1141}
1142
1143static inline pud_t pud_mkwrite(pud_t pud)
1144{
1145 pud_val(pud) |= _REGION3_ENTRY_WRITE;
1146 if (pud_large(pud) && !(pud_val(pud) & _REGION3_ENTRY_DIRTY))
1147 return pud;
1148 pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1149 return pud;
1150}
1151
1152static inline pud_t pud_mkclean(pud_t pud)
1153{
1154 if (pud_large(pud)) {
1155 pud_val(pud) &= ~_REGION3_ENTRY_DIRTY;
1156 pud_val(pud) |= _REGION_ENTRY_PROTECT;
1157 }
1158 return pud;
1159}
1160
1161static inline pud_t pud_mkdirty(pud_t pud)
1162{
1163 if (pud_large(pud)) {
1164 pud_val(pud) |= _REGION3_ENTRY_DIRTY |
1165 _REGION3_ENTRY_SOFT_DIRTY;
1166 if (pud_val(pud) & _REGION3_ENTRY_WRITE)
1167 pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1168 }
1169 return pud;
1170}
1171
1172#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1173static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1174{
1175 /*
1176 * pgprot is PAGE_NONE, PAGE_READ, or PAGE_WRITE (see __Pxxx / __Sxxx)
1177 * Convert to segment table entry format.
1178 */
1179 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1180 return pgprot_val(SEGMENT_NONE);
1181 if (pgprot_val(pgprot) == pgprot_val(PAGE_READ))
1182 return pgprot_val(SEGMENT_READ);
1183 return pgprot_val(SEGMENT_WRITE);
1184}
1185
1186static inline pmd_t pmd_mkyoung(pmd_t pmd)
1187{
1188 if (pmd_large(pmd)) {
1189 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1190 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1191 pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
1192 }
1193 return pmd;
1194}
1195
1196static inline pmd_t pmd_mkold(pmd_t pmd)
1197{
1198 if (pmd_large(pmd)) {
1199 pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
1200 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1201 }
1202 return pmd;
1203}
1204
1205static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1206{
1207 if (pmd_large(pmd)) {
1208 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
1209 _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
1210 _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY;
1211 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1212 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1213 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1214 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1215 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1216 return pmd;
1217 }
1218 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN;
1219 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1220 return pmd;
1221}
1222
1223static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1224{
1225 pmd_t __pmd;
1226 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1227 return __pmd;
1228}
1229
1230#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1231
1232static inline void __pmdp_csp(pmd_t *pmdp)
1233{
1234 csp((unsigned int *)pmdp + 1, pmd_val(*pmdp),
1235 pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1236}
1237
1238#define IDTE_GLOBAL 0
1239#define IDTE_LOCAL 1
1240
1241static inline void __pmdp_idte(unsigned long address, pmd_t *pmdp, int local)
1242{
1243 unsigned long sto;
1244
1245 sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
1246 asm volatile(
1247 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1248 : "+m" (*pmdp)
1249 : [r1] "a" (sto), [r2] "a" ((address & HPAGE_MASK)),
1250 [m4] "i" (local)
1251 : "cc" );
1252}
1253
1254static inline void __pudp_idte(unsigned long address, pud_t *pudp, int local)
1255{
1256 unsigned long r3o;
1257
1258 r3o = (unsigned long) pudp - pud_index(address) * sizeof(pud_t);
1259 r3o |= _ASCE_TYPE_REGION3;
1260 asm volatile(
1261 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1262 : "+m" (*pudp)
1263 : [r1] "a" (r3o), [r2] "a" ((address & PUD_MASK)),
1264 [m4] "i" (local)
1265 : "cc");
1266}
1267
1268pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1269pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1270pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t);
1271
1272#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1273
1274#define __HAVE_ARCH_PGTABLE_DEPOSIT
1275void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1276 pgtable_t pgtable);
1277
1278#define __HAVE_ARCH_PGTABLE_WITHDRAW
1279pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1280
1281#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1282static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
1283 unsigned long addr, pmd_t *pmdp,
1284 pmd_t entry, int dirty)
1285{
1286 VM_BUG_ON(addr & ~HPAGE_MASK);
1287
1288 entry = pmd_mkyoung(entry);
1289 if (dirty)
1290 entry = pmd_mkdirty(entry);
1291 if (pmd_val(*pmdp) == pmd_val(entry))
1292 return 0;
1293 pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
1294 return 1;
1295}
1296
1297#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1298static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1299 unsigned long addr, pmd_t *pmdp)
1300{
1301 pmd_t pmd = *pmdp;
1302
1303 pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
1304 return pmd_young(pmd);
1305}
1306
1307#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1308static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
1309 unsigned long addr, pmd_t *pmdp)
1310{
1311 VM_BUG_ON(addr & ~HPAGE_MASK);
1312 return pmdp_test_and_clear_young(vma, addr, pmdp);
1313}
1314
1315static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1316 pmd_t *pmdp, pmd_t entry)
1317{
1318 *pmdp = entry;
1319}
1320
1321static inline pmd_t pmd_mkhuge(pmd_t pmd)
1322{
1323 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1324 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1325 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1326 return pmd;
1327}
1328
1329#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1330static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1331 unsigned long addr, pmd_t *pmdp)
1332{
1333 return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_INVALID));
1334}
1335
1336#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
1337static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
1338 unsigned long addr,
1339 pmd_t *pmdp, int full)
1340{
1341 if (full) {
1342 pmd_t pmd = *pmdp;
1343 *pmdp = __pmd(_SEGMENT_ENTRY_INVALID);
1344 return pmd;
1345 }
1346 return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_INVALID));
1347}
1348
1349#define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
1350static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
1351 unsigned long addr, pmd_t *pmdp)
1352{
1353 return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
1354}
1355
1356#define __HAVE_ARCH_PMDP_INVALIDATE
1357static inline void pmdp_invalidate(struct vm_area_struct *vma,
1358 unsigned long addr, pmd_t *pmdp)
1359{
1360 pmdp_xchg_direct(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_INVALID));
1361}
1362
1363#define __HAVE_ARCH_PMDP_SET_WRPROTECT
1364static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1365 unsigned long addr, pmd_t *pmdp)
1366{
1367 pmd_t pmd = *pmdp;
1368
1369 if (pmd_write(pmd))
1370 pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
1371}
1372
1373static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1374 unsigned long address,
1375 pmd_t *pmdp)
1376{
1377 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
1378}
1379#define pmdp_collapse_flush pmdp_collapse_flush
1380
1381#define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1382#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1383
1384static inline int pmd_trans_huge(pmd_t pmd)
1385{
1386 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1387}
1388
1389#define has_transparent_hugepage has_transparent_hugepage
1390static inline int has_transparent_hugepage(void)
1391{
1392 return MACHINE_HAS_HPAGE ? 1 : 0;
1393}
1394#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1395
1396/*
1397 * 64 bit swap entry format:
1398 * A page-table entry has some bits we have to treat in a special way.
1399 * Bits 52 and bit 55 have to be zero, otherwise a specification
1400 * exception will occur instead of a page translation exception. The
1401 * specification exception has the bad habit not to store necessary
1402 * information in the lowcore.
1403 * Bits 54 and 63 are used to indicate the page type.
1404 * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
1405 * This leaves the bits 0-51 and bits 56-62 to store type and offset.
1406 * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51
1407 * for the offset.
1408 * | offset |01100|type |00|
1409 * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
1410 * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
1411 */
1412
1413#define __SWP_OFFSET_MASK ((1UL << 52) - 1)
1414#define __SWP_OFFSET_SHIFT 12
1415#define __SWP_TYPE_MASK ((1UL << 5) - 1)
1416#define __SWP_TYPE_SHIFT 2
1417
1418static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1419{
1420 pte_t pte;
1421
1422 pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT;
1423 pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
1424 pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
1425 return pte;
1426}
1427
1428static inline unsigned long __swp_type(swp_entry_t entry)
1429{
1430 return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
1431}
1432
1433static inline unsigned long __swp_offset(swp_entry_t entry)
1434{
1435 return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
1436}
1437
1438static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
1439{
1440 return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
1441}
1442
1443#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1444#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1445
1446#endif /* !__ASSEMBLY__ */
1447
1448#define kern_addr_valid(addr) (1)
1449
1450extern int vmem_add_mapping(unsigned long start, unsigned long size);
1451extern int vmem_remove_mapping(unsigned long start, unsigned long size);
1452extern int s390_enable_sie(void);
1453extern int s390_enable_skey(void);
1454extern void s390_reset_cmma(struct mm_struct *mm);
1455
1456/* s390 has a private copy of get unmapped area to deal with cache synonyms */
1457#define HAVE_ARCH_UNMAPPED_AREA
1458#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1459
1460/*
1461 * No page table caches to initialise
1462 */
1463static inline void pgtable_cache_init(void) { }
1464static inline void check_pgt_cache(void) { }
1465
1466#include <asm-generic/pgtable.h>
1467
1468#endif /* _S390_PAGE_H */