at v5.5 1655 lines 50 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * S390 version 4 * Copyright IBM Corp. 1999, 2000 5 * Author(s): Hartmut Penner (hp@de.ibm.com) 6 * Ulrich Weigand (weigand@de.ibm.com) 7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 8 * 9 * Derived from "include/asm-i386/pgtable.h" 10 */ 11 12#ifndef _ASM_S390_PGTABLE_H 13#define _ASM_S390_PGTABLE_H 14 15#include <linux/sched.h> 16#include <linux/mm_types.h> 17#include <linux/page-flags.h> 18#include <linux/radix-tree.h> 19#include <linux/atomic.h> 20#include <asm/bug.h> 21#include <asm/page.h> 22 23extern pgd_t swapper_pg_dir[]; 24extern void paging_init(void); 25 26enum { 27 PG_DIRECT_MAP_4K = 0, 28 PG_DIRECT_MAP_1M, 29 PG_DIRECT_MAP_2G, 30 PG_DIRECT_MAP_MAX 31}; 32 33extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX]; 34 35static inline void update_page_count(int level, long count) 36{ 37 if (IS_ENABLED(CONFIG_PROC_FS)) 38 atomic_long_add(count, &direct_pages_count[level]); 39} 40 41struct seq_file; 42void arch_report_meminfo(struct seq_file *m); 43 44/* 45 * The S390 doesn't have any external MMU info: the kernel page 46 * tables contain all the necessary information. 47 */ 48#define update_mmu_cache(vma, address, ptep) do { } while (0) 49#define update_mmu_cache_pmd(vma, address, ptep) do { } while (0) 50 51/* 52 * ZERO_PAGE is a global shared page that is always zero; used 53 * for zero-mapped memory areas etc.. 54 */ 55 56extern unsigned long empty_zero_page; 57extern unsigned long zero_page_mask; 58 59#define ZERO_PAGE(vaddr) \ 60 (virt_to_page((void *)(empty_zero_page + \ 61 (((unsigned long)(vaddr)) &zero_page_mask)))) 62#define __HAVE_COLOR_ZERO_PAGE 63 64/* TODO: s390 cannot support io_remap_pfn_range... */ 65 66#define FIRST_USER_ADDRESS 0UL 67 68#define pte_ERROR(e) \ 69 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e)) 70#define pmd_ERROR(e) \ 71 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e)) 72#define pud_ERROR(e) \ 73 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e)) 74#define p4d_ERROR(e) \ 75 printk("%s:%d: bad p4d %p.\n", __FILE__, __LINE__, (void *) p4d_val(e)) 76#define pgd_ERROR(e) \ 77 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e)) 78 79/* 80 * The vmalloc and module area will always be on the topmost area of the 81 * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules. 82 * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where 83 * modules will reside. That makes sure that inter module branches always 84 * happen without trampolines and in addition the placement within a 2GB frame 85 * is branch prediction unit friendly. 86 */ 87extern unsigned long VMALLOC_START; 88extern unsigned long VMALLOC_END; 89#define VMALLOC_DEFAULT_SIZE ((128UL << 30) - MODULES_LEN) 90extern struct page *vmemmap; 91 92#define VMEM_MAX_PHYS ((unsigned long) vmemmap) 93 94extern unsigned long MODULES_VADDR; 95extern unsigned long MODULES_END; 96#define MODULES_VADDR MODULES_VADDR 97#define MODULES_END MODULES_END 98#define MODULES_LEN (1UL << 31) 99 100static inline int is_module_addr(void *addr) 101{ 102 BUILD_BUG_ON(MODULES_LEN > (1UL << 31)); 103 if (addr < (void *)MODULES_VADDR) 104 return 0; 105 if (addr > (void *)MODULES_END) 106 return 0; 107 return 1; 108} 109 110/* 111 * A 64 bit pagetable entry of S390 has following format: 112 * | PFRA |0IPC| OS | 113 * 0000000000111111111122222222223333333333444444444455555555556666 114 * 0123456789012345678901234567890123456789012345678901234567890123 115 * 116 * I Page-Invalid Bit: Page is not available for address-translation 117 * P Page-Protection Bit: Store access not possible for page 118 * C Change-bit override: HW is not required to set change bit 119 * 120 * A 64 bit segmenttable entry of S390 has following format: 121 * | P-table origin | TT 122 * 0000000000111111111122222222223333333333444444444455555555556666 123 * 0123456789012345678901234567890123456789012345678901234567890123 124 * 125 * I Segment-Invalid Bit: Segment is not available for address-translation 126 * C Common-Segment Bit: Segment is not private (PoP 3-30) 127 * P Page-Protection Bit: Store access not possible for page 128 * TT Type 00 129 * 130 * A 64 bit region table entry of S390 has following format: 131 * | S-table origin | TF TTTL 132 * 0000000000111111111122222222223333333333444444444455555555556666 133 * 0123456789012345678901234567890123456789012345678901234567890123 134 * 135 * I Segment-Invalid Bit: Segment is not available for address-translation 136 * TT Type 01 137 * TF 138 * TL Table length 139 * 140 * The 64 bit regiontable origin of S390 has following format: 141 * | region table origon | DTTL 142 * 0000000000111111111122222222223333333333444444444455555555556666 143 * 0123456789012345678901234567890123456789012345678901234567890123 144 * 145 * X Space-Switch event: 146 * G Segment-Invalid Bit: 147 * P Private-Space Bit: 148 * S Storage-Alteration: 149 * R Real space 150 * TL Table-Length: 151 * 152 * A storage key has the following format: 153 * | ACC |F|R|C|0| 154 * 0 3 4 5 6 7 155 * ACC: access key 156 * F : fetch protection bit 157 * R : referenced bit 158 * C : changed bit 159 */ 160 161/* Hardware bits in the page table entry */ 162#define _PAGE_NOEXEC 0x100 /* HW no-execute bit */ 163#define _PAGE_PROTECT 0x200 /* HW read-only bit */ 164#define _PAGE_INVALID 0x400 /* HW invalid bit */ 165#define _PAGE_LARGE 0x800 /* Bit to mark a large pte */ 166 167/* Software bits in the page table entry */ 168#define _PAGE_PRESENT 0x001 /* SW pte present bit */ 169#define _PAGE_YOUNG 0x004 /* SW pte young bit */ 170#define _PAGE_DIRTY 0x008 /* SW pte dirty bit */ 171#define _PAGE_READ 0x010 /* SW pte read bit */ 172#define _PAGE_WRITE 0x020 /* SW pte write bit */ 173#define _PAGE_SPECIAL 0x040 /* SW associated with special page */ 174#define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */ 175 176#ifdef CONFIG_MEM_SOFT_DIRTY 177#define _PAGE_SOFT_DIRTY 0x002 /* SW pte soft dirty bit */ 178#else 179#define _PAGE_SOFT_DIRTY 0x000 180#endif 181 182/* Set of bits not changed in pte_modify */ 183#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \ 184 _PAGE_YOUNG | _PAGE_SOFT_DIRTY) 185 186/* 187 * handle_pte_fault uses pte_present and pte_none to find out the pte type 188 * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to 189 * distinguish present from not-present ptes. It is changed only with the page 190 * table lock held. 191 * 192 * The following table gives the different possible bit combinations for 193 * the pte hardware and software bits in the last 12 bits of a pte 194 * (. unassigned bit, x don't care, t swap type): 195 * 196 * 842100000000 197 * 000084210000 198 * 000000008421 199 * .IR.uswrdy.p 200 * empty .10.00000000 201 * swap .11..ttttt.0 202 * prot-none, clean, old .11.xx0000.1 203 * prot-none, clean, young .11.xx0001.1 204 * prot-none, dirty, old .11.xx0010.1 205 * prot-none, dirty, young .11.xx0011.1 206 * read-only, clean, old .11.xx0100.1 207 * read-only, clean, young .01.xx0101.1 208 * read-only, dirty, old .11.xx0110.1 209 * read-only, dirty, young .01.xx0111.1 210 * read-write, clean, old .11.xx1100.1 211 * read-write, clean, young .01.xx1101.1 212 * read-write, dirty, old .10.xx1110.1 213 * read-write, dirty, young .00.xx1111.1 214 * HW-bits: R read-only, I invalid 215 * SW-bits: p present, y young, d dirty, r read, w write, s special, 216 * u unused, l large 217 * 218 * pte_none is true for the bit pattern .10.00000000, pte == 0x400 219 * pte_swap is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200 220 * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001 221 */ 222 223/* Bits in the segment/region table address-space-control-element */ 224#define _ASCE_ORIGIN ~0xfffUL/* region/segment table origin */ 225#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ 226#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ 227#define _ASCE_SPACE_SWITCH 0x40 /* space switch event */ 228#define _ASCE_REAL_SPACE 0x20 /* real space control */ 229#define _ASCE_TYPE_MASK 0x0c /* asce table type mask */ 230#define _ASCE_TYPE_REGION1 0x0c /* region first table type */ 231#define _ASCE_TYPE_REGION2 0x08 /* region second table type */ 232#define _ASCE_TYPE_REGION3 0x04 /* region third table type */ 233#define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */ 234#define _ASCE_TABLE_LENGTH 0x03 /* region table length */ 235 236/* Bits in the region table entry */ 237#define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */ 238#define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */ 239#define _REGION_ENTRY_NOEXEC 0x100 /* region no-execute bit */ 240#define _REGION_ENTRY_OFFSET 0xc0 /* region table offset */ 241#define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */ 242#define _REGION_ENTRY_TYPE_MASK 0x0c /* region table type mask */ 243#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */ 244#define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */ 245#define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */ 246#define _REGION_ENTRY_LENGTH 0x03 /* region third length */ 247 248#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH) 249#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID) 250#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH) 251#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID) 252#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH) 253#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID) 254 255#define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address */ 256#define _REGION3_ENTRY_DIRTY 0x2000 /* SW region dirty bit */ 257#define _REGION3_ENTRY_YOUNG 0x1000 /* SW region young bit */ 258#define _REGION3_ENTRY_LARGE 0x0400 /* RTTE-format control, large page */ 259#define _REGION3_ENTRY_READ 0x0002 /* SW region read bit */ 260#define _REGION3_ENTRY_WRITE 0x0001 /* SW region write bit */ 261 262#ifdef CONFIG_MEM_SOFT_DIRTY 263#define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */ 264#else 265#define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */ 266#endif 267 268#define _REGION_ENTRY_BITS 0xfffffffffffff22fUL 269 270/* Bits in the segment table entry */ 271#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL 272#define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe30UL 273#define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff00730UL 274#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */ 275#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* page table origin */ 276#define _SEGMENT_ENTRY_PROTECT 0x200 /* segment protection bit */ 277#define _SEGMENT_ENTRY_NOEXEC 0x100 /* segment no-execute bit */ 278#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */ 279#define _SEGMENT_ENTRY_TYPE_MASK 0x0c /* segment table type mask */ 280 281#define _SEGMENT_ENTRY (0) 282#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID) 283 284#define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */ 285#define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */ 286#define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */ 287#define _SEGMENT_ENTRY_WRITE 0x0002 /* SW segment write bit */ 288#define _SEGMENT_ENTRY_READ 0x0001 /* SW segment read bit */ 289 290#ifdef CONFIG_MEM_SOFT_DIRTY 291#define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */ 292#else 293#define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */ 294#endif 295 296#define _CRST_ENTRIES 2048 /* number of region/segment table entries */ 297#define _PAGE_ENTRIES 256 /* number of page table entries */ 298 299#define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8) 300#define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8) 301 302#define _REGION1_SHIFT 53 303#define _REGION2_SHIFT 42 304#define _REGION3_SHIFT 31 305#define _SEGMENT_SHIFT 20 306 307#define _REGION1_INDEX (0x7ffUL << _REGION1_SHIFT) 308#define _REGION2_INDEX (0x7ffUL << _REGION2_SHIFT) 309#define _REGION3_INDEX (0x7ffUL << _REGION3_SHIFT) 310#define _SEGMENT_INDEX (0x7ffUL << _SEGMENT_SHIFT) 311#define _PAGE_INDEX (0xffUL << _PAGE_SHIFT) 312 313#define _REGION1_SIZE (1UL << _REGION1_SHIFT) 314#define _REGION2_SIZE (1UL << _REGION2_SHIFT) 315#define _REGION3_SIZE (1UL << _REGION3_SHIFT) 316#define _SEGMENT_SIZE (1UL << _SEGMENT_SHIFT) 317 318#define _REGION1_MASK (~(_REGION1_SIZE - 1)) 319#define _REGION2_MASK (~(_REGION2_SIZE - 1)) 320#define _REGION3_MASK (~(_REGION3_SIZE - 1)) 321#define _SEGMENT_MASK (~(_SEGMENT_SIZE - 1)) 322 323#define PMD_SHIFT _SEGMENT_SHIFT 324#define PUD_SHIFT _REGION3_SHIFT 325#define P4D_SHIFT _REGION2_SHIFT 326#define PGDIR_SHIFT _REGION1_SHIFT 327 328#define PMD_SIZE _SEGMENT_SIZE 329#define PUD_SIZE _REGION3_SIZE 330#define P4D_SIZE _REGION2_SIZE 331#define PGDIR_SIZE _REGION1_SIZE 332 333#define PMD_MASK _SEGMENT_MASK 334#define PUD_MASK _REGION3_MASK 335#define P4D_MASK _REGION2_MASK 336#define PGDIR_MASK _REGION1_MASK 337 338#define PTRS_PER_PTE _PAGE_ENTRIES 339#define PTRS_PER_PMD _CRST_ENTRIES 340#define PTRS_PER_PUD _CRST_ENTRIES 341#define PTRS_PER_P4D _CRST_ENTRIES 342#define PTRS_PER_PGD _CRST_ENTRIES 343 344#define MAX_PTRS_PER_P4D PTRS_PER_P4D 345 346/* 347 * Segment table and region3 table entry encoding 348 * (R = read-only, I = invalid, y = young bit): 349 * dy..R...I...wr 350 * prot-none, clean, old 00..1...1...00 351 * prot-none, clean, young 01..1...1...00 352 * prot-none, dirty, old 10..1...1...00 353 * prot-none, dirty, young 11..1...1...00 354 * read-only, clean, old 00..1...1...01 355 * read-only, clean, young 01..1...0...01 356 * read-only, dirty, old 10..1...1...01 357 * read-only, dirty, young 11..1...0...01 358 * read-write, clean, old 00..1...1...11 359 * read-write, clean, young 01..1...0...11 360 * read-write, dirty, old 10..0...1...11 361 * read-write, dirty, young 11..0...0...11 362 * The segment table origin is used to distinguish empty (origin==0) from 363 * read-write, old segment table entries (origin!=0) 364 * HW-bits: R read-only, I invalid 365 * SW-bits: y young, d dirty, r read, w write 366 */ 367 368/* Page status table bits for virtualization */ 369#define PGSTE_ACC_BITS 0xf000000000000000UL 370#define PGSTE_FP_BIT 0x0800000000000000UL 371#define PGSTE_PCL_BIT 0x0080000000000000UL 372#define PGSTE_HR_BIT 0x0040000000000000UL 373#define PGSTE_HC_BIT 0x0020000000000000UL 374#define PGSTE_GR_BIT 0x0004000000000000UL 375#define PGSTE_GC_BIT 0x0002000000000000UL 376#define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */ 377#define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */ 378#define PGSTE_VSIE_BIT 0x0000200000000000UL /* ref'd in a shadow table */ 379 380/* Guest Page State used for virtualization */ 381#define _PGSTE_GPS_ZERO 0x0000000080000000UL 382#define _PGSTE_GPS_NODAT 0x0000000040000000UL 383#define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL 384#define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL 385#define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL 386#define _PGSTE_GPS_USAGE_POT_VOLATILE 0x0000000002000000UL 387#define _PGSTE_GPS_USAGE_VOLATILE _PGSTE_GPS_USAGE_MASK 388 389/* 390 * A user page table pointer has the space-switch-event bit, the 391 * private-space-control bit and the storage-alteration-event-control 392 * bit set. A kernel page table pointer doesn't need them. 393 */ 394#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \ 395 _ASCE_ALT_EVENT) 396 397/* 398 * Page protection definitions. 399 */ 400#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT) 401#define PAGE_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | \ 402 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT) 403#define PAGE_RX __pgprot(_PAGE_PRESENT | _PAGE_READ | \ 404 _PAGE_INVALID | _PAGE_PROTECT) 405#define PAGE_RW __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 406 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT) 407#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 408 _PAGE_INVALID | _PAGE_PROTECT) 409 410#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 411 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC) 412#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 413 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC) 414#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \ 415 _PAGE_PROTECT | _PAGE_NOEXEC) 416#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 417 _PAGE_YOUNG | _PAGE_DIRTY) 418 419/* 420 * On s390 the page table entry has an invalid bit and a read-only bit. 421 * Read permission implies execute permission and write permission 422 * implies read permission. 423 */ 424 /*xwr*/ 425#define __P000 PAGE_NONE 426#define __P001 PAGE_RO 427#define __P010 PAGE_RO 428#define __P011 PAGE_RO 429#define __P100 PAGE_RX 430#define __P101 PAGE_RX 431#define __P110 PAGE_RX 432#define __P111 PAGE_RX 433 434#define __S000 PAGE_NONE 435#define __S001 PAGE_RO 436#define __S010 PAGE_RW 437#define __S011 PAGE_RW 438#define __S100 PAGE_RX 439#define __S101 PAGE_RX 440#define __S110 PAGE_RWX 441#define __S111 PAGE_RWX 442 443/* 444 * Segment entry (large page) protection definitions. 445 */ 446#define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \ 447 _SEGMENT_ENTRY_PROTECT) 448#define SEGMENT_RO __pgprot(_SEGMENT_ENTRY_PROTECT | \ 449 _SEGMENT_ENTRY_READ | \ 450 _SEGMENT_ENTRY_NOEXEC) 451#define SEGMENT_RX __pgprot(_SEGMENT_ENTRY_PROTECT | \ 452 _SEGMENT_ENTRY_READ) 453#define SEGMENT_RW __pgprot(_SEGMENT_ENTRY_READ | \ 454 _SEGMENT_ENTRY_WRITE | \ 455 _SEGMENT_ENTRY_NOEXEC) 456#define SEGMENT_RWX __pgprot(_SEGMENT_ENTRY_READ | \ 457 _SEGMENT_ENTRY_WRITE) 458#define SEGMENT_KERNEL __pgprot(_SEGMENT_ENTRY | \ 459 _SEGMENT_ENTRY_LARGE | \ 460 _SEGMENT_ENTRY_READ | \ 461 _SEGMENT_ENTRY_WRITE | \ 462 _SEGMENT_ENTRY_YOUNG | \ 463 _SEGMENT_ENTRY_DIRTY | \ 464 _SEGMENT_ENTRY_NOEXEC) 465#define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY | \ 466 _SEGMENT_ENTRY_LARGE | \ 467 _SEGMENT_ENTRY_READ | \ 468 _SEGMENT_ENTRY_YOUNG | \ 469 _SEGMENT_ENTRY_PROTECT | \ 470 _SEGMENT_ENTRY_NOEXEC) 471#define SEGMENT_KERNEL_EXEC __pgprot(_SEGMENT_ENTRY | \ 472 _SEGMENT_ENTRY_LARGE | \ 473 _SEGMENT_ENTRY_READ | \ 474 _SEGMENT_ENTRY_WRITE | \ 475 _SEGMENT_ENTRY_YOUNG | \ 476 _SEGMENT_ENTRY_DIRTY) 477 478/* 479 * Region3 entry (large page) protection definitions. 480 */ 481 482#define REGION3_KERNEL __pgprot(_REGION_ENTRY_TYPE_R3 | \ 483 _REGION3_ENTRY_LARGE | \ 484 _REGION3_ENTRY_READ | \ 485 _REGION3_ENTRY_WRITE | \ 486 _REGION3_ENTRY_YOUNG | \ 487 _REGION3_ENTRY_DIRTY | \ 488 _REGION_ENTRY_NOEXEC) 489#define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \ 490 _REGION3_ENTRY_LARGE | \ 491 _REGION3_ENTRY_READ | \ 492 _REGION3_ENTRY_YOUNG | \ 493 _REGION_ENTRY_PROTECT | \ 494 _REGION_ENTRY_NOEXEC) 495 496static inline bool mm_p4d_folded(struct mm_struct *mm) 497{ 498 return mm->context.asce_limit <= _REGION1_SIZE; 499} 500#define mm_p4d_folded(mm) mm_p4d_folded(mm) 501 502static inline bool mm_pud_folded(struct mm_struct *mm) 503{ 504 return mm->context.asce_limit <= _REGION2_SIZE; 505} 506#define mm_pud_folded(mm) mm_pud_folded(mm) 507 508static inline bool mm_pmd_folded(struct mm_struct *mm) 509{ 510 return mm->context.asce_limit <= _REGION3_SIZE; 511} 512#define mm_pmd_folded(mm) mm_pmd_folded(mm) 513 514static inline int mm_has_pgste(struct mm_struct *mm) 515{ 516#ifdef CONFIG_PGSTE 517 if (unlikely(mm->context.has_pgste)) 518 return 1; 519#endif 520 return 0; 521} 522 523static inline int mm_alloc_pgste(struct mm_struct *mm) 524{ 525#ifdef CONFIG_PGSTE 526 if (unlikely(mm->context.alloc_pgste)) 527 return 1; 528#endif 529 return 0; 530} 531 532/* 533 * In the case that a guest uses storage keys 534 * faults should no longer be backed by zero pages 535 */ 536#define mm_forbids_zeropage mm_has_pgste 537static inline int mm_uses_skeys(struct mm_struct *mm) 538{ 539#ifdef CONFIG_PGSTE 540 if (mm->context.uses_skeys) 541 return 1; 542#endif 543 return 0; 544} 545 546static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new) 547{ 548 register unsigned long reg2 asm("2") = old; 549 register unsigned long reg3 asm("3") = new; 550 unsigned long address = (unsigned long)ptr | 1; 551 552 asm volatile( 553 " csp %0,%3" 554 : "+d" (reg2), "+m" (*ptr) 555 : "d" (reg3), "d" (address) 556 : "cc"); 557} 558 559static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new) 560{ 561 register unsigned long reg2 asm("2") = old; 562 register unsigned long reg3 asm("3") = new; 563 unsigned long address = (unsigned long)ptr | 1; 564 565 asm volatile( 566 " .insn rre,0xb98a0000,%0,%3" 567 : "+d" (reg2), "+m" (*ptr) 568 : "d" (reg3), "d" (address) 569 : "cc"); 570} 571 572#define CRDTE_DTT_PAGE 0x00UL 573#define CRDTE_DTT_SEGMENT 0x10UL 574#define CRDTE_DTT_REGION3 0x14UL 575#define CRDTE_DTT_REGION2 0x18UL 576#define CRDTE_DTT_REGION1 0x1cUL 577 578static inline void crdte(unsigned long old, unsigned long new, 579 unsigned long table, unsigned long dtt, 580 unsigned long address, unsigned long asce) 581{ 582 register unsigned long reg2 asm("2") = old; 583 register unsigned long reg3 asm("3") = new; 584 register unsigned long reg4 asm("4") = table | dtt; 585 register unsigned long reg5 asm("5") = address; 586 587 asm volatile(".insn rrf,0xb98f0000,%0,%2,%4,0" 588 : "+d" (reg2) 589 : "d" (reg3), "d" (reg4), "d" (reg5), "a" (asce) 590 : "memory", "cc"); 591} 592 593/* 594 * pgd/p4d/pud/pmd/pte query functions 595 */ 596static inline int pgd_folded(pgd_t pgd) 597{ 598 return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1; 599} 600 601static inline int pgd_present(pgd_t pgd) 602{ 603 if (pgd_folded(pgd)) 604 return 1; 605 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL; 606} 607 608static inline int pgd_none(pgd_t pgd) 609{ 610 if (pgd_folded(pgd)) 611 return 0; 612 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL; 613} 614 615static inline int pgd_bad(pgd_t pgd) 616{ 617 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1) 618 return 0; 619 return (pgd_val(pgd) & ~_REGION_ENTRY_BITS) != 0; 620} 621 622static inline unsigned long pgd_pfn(pgd_t pgd) 623{ 624 unsigned long origin_mask; 625 626 origin_mask = _REGION_ENTRY_ORIGIN; 627 return (pgd_val(pgd) & origin_mask) >> PAGE_SHIFT; 628} 629 630static inline int p4d_folded(p4d_t p4d) 631{ 632 return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2; 633} 634 635static inline int p4d_present(p4d_t p4d) 636{ 637 if (p4d_folded(p4d)) 638 return 1; 639 return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL; 640} 641 642static inline int p4d_none(p4d_t p4d) 643{ 644 if (p4d_folded(p4d)) 645 return 0; 646 return p4d_val(p4d) == _REGION2_ENTRY_EMPTY; 647} 648 649static inline unsigned long p4d_pfn(p4d_t p4d) 650{ 651 unsigned long origin_mask; 652 653 origin_mask = _REGION_ENTRY_ORIGIN; 654 return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT; 655} 656 657static inline int pud_folded(pud_t pud) 658{ 659 return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3; 660} 661 662static inline int pud_present(pud_t pud) 663{ 664 if (pud_folded(pud)) 665 return 1; 666 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL; 667} 668 669static inline int pud_none(pud_t pud) 670{ 671 if (pud_folded(pud)) 672 return 0; 673 return pud_val(pud) == _REGION3_ENTRY_EMPTY; 674} 675 676static inline int pud_large(pud_t pud) 677{ 678 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3) 679 return 0; 680 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE); 681} 682 683static inline unsigned long pud_pfn(pud_t pud) 684{ 685 unsigned long origin_mask; 686 687 origin_mask = _REGION_ENTRY_ORIGIN; 688 if (pud_large(pud)) 689 origin_mask = _REGION3_ENTRY_ORIGIN_LARGE; 690 return (pud_val(pud) & origin_mask) >> PAGE_SHIFT; 691} 692 693static inline int pmd_large(pmd_t pmd) 694{ 695 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0; 696} 697 698static inline int pmd_bad(pmd_t pmd) 699{ 700 if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_large(pmd)) 701 return 1; 702 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0; 703} 704 705static inline int pud_bad(pud_t pud) 706{ 707 unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK; 708 709 if (type > _REGION_ENTRY_TYPE_R3 || pud_large(pud)) 710 return 1; 711 if (type < _REGION_ENTRY_TYPE_R3) 712 return 0; 713 return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0; 714} 715 716static inline int p4d_bad(p4d_t p4d) 717{ 718 unsigned long type = p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK; 719 720 if (type > _REGION_ENTRY_TYPE_R2) 721 return 1; 722 if (type < _REGION_ENTRY_TYPE_R2) 723 return 0; 724 return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0; 725} 726 727static inline int pmd_present(pmd_t pmd) 728{ 729 return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY; 730} 731 732static inline int pmd_none(pmd_t pmd) 733{ 734 return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY; 735} 736 737static inline unsigned long pmd_pfn(pmd_t pmd) 738{ 739 unsigned long origin_mask; 740 741 origin_mask = _SEGMENT_ENTRY_ORIGIN; 742 if (pmd_large(pmd)) 743 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE; 744 return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT; 745} 746 747#define pmd_write pmd_write 748static inline int pmd_write(pmd_t pmd) 749{ 750 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0; 751} 752 753static inline int pmd_dirty(pmd_t pmd) 754{ 755 return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0; 756} 757 758static inline int pmd_young(pmd_t pmd) 759{ 760 return (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0; 761} 762 763static inline int pte_present(pte_t pte) 764{ 765 /* Bit pattern: (pte & 0x001) == 0x001 */ 766 return (pte_val(pte) & _PAGE_PRESENT) != 0; 767} 768 769static inline int pte_none(pte_t pte) 770{ 771 /* Bit pattern: pte == 0x400 */ 772 return pte_val(pte) == _PAGE_INVALID; 773} 774 775static inline int pte_swap(pte_t pte) 776{ 777 /* Bit pattern: (pte & 0x201) == 0x200 */ 778 return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT)) 779 == _PAGE_PROTECT; 780} 781 782static inline int pte_special(pte_t pte) 783{ 784 return (pte_val(pte) & _PAGE_SPECIAL); 785} 786 787#define __HAVE_ARCH_PTE_SAME 788static inline int pte_same(pte_t a, pte_t b) 789{ 790 return pte_val(a) == pte_val(b); 791} 792 793#ifdef CONFIG_NUMA_BALANCING 794static inline int pte_protnone(pte_t pte) 795{ 796 return pte_present(pte) && !(pte_val(pte) & _PAGE_READ); 797} 798 799static inline int pmd_protnone(pmd_t pmd) 800{ 801 /* pmd_large(pmd) implies pmd_present(pmd) */ 802 return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ); 803} 804#endif 805 806static inline int pte_soft_dirty(pte_t pte) 807{ 808 return pte_val(pte) & _PAGE_SOFT_DIRTY; 809} 810#define pte_swp_soft_dirty pte_soft_dirty 811 812static inline pte_t pte_mksoft_dirty(pte_t pte) 813{ 814 pte_val(pte) |= _PAGE_SOFT_DIRTY; 815 return pte; 816} 817#define pte_swp_mksoft_dirty pte_mksoft_dirty 818 819static inline pte_t pte_clear_soft_dirty(pte_t pte) 820{ 821 pte_val(pte) &= ~_PAGE_SOFT_DIRTY; 822 return pte; 823} 824#define pte_swp_clear_soft_dirty pte_clear_soft_dirty 825 826static inline int pmd_soft_dirty(pmd_t pmd) 827{ 828 return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY; 829} 830 831static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) 832{ 833 pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY; 834 return pmd; 835} 836 837static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) 838{ 839 pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY; 840 return pmd; 841} 842 843/* 844 * query functions pte_write/pte_dirty/pte_young only work if 845 * pte_present() is true. Undefined behaviour if not.. 846 */ 847static inline int pte_write(pte_t pte) 848{ 849 return (pte_val(pte) & _PAGE_WRITE) != 0; 850} 851 852static inline int pte_dirty(pte_t pte) 853{ 854 return (pte_val(pte) & _PAGE_DIRTY) != 0; 855} 856 857static inline int pte_young(pte_t pte) 858{ 859 return (pte_val(pte) & _PAGE_YOUNG) != 0; 860} 861 862#define __HAVE_ARCH_PTE_UNUSED 863static inline int pte_unused(pte_t pte) 864{ 865 return pte_val(pte) & _PAGE_UNUSED; 866} 867 868/* 869 * pgd/pmd/pte modification functions 870 */ 871 872static inline void pgd_clear(pgd_t *pgd) 873{ 874 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1) 875 pgd_val(*pgd) = _REGION1_ENTRY_EMPTY; 876} 877 878static inline void p4d_clear(p4d_t *p4d) 879{ 880 if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) 881 p4d_val(*p4d) = _REGION2_ENTRY_EMPTY; 882} 883 884static inline void pud_clear(pud_t *pud) 885{ 886 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 887 pud_val(*pud) = _REGION3_ENTRY_EMPTY; 888} 889 890static inline void pmd_clear(pmd_t *pmdp) 891{ 892 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; 893} 894 895static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 896{ 897 pte_val(*ptep) = _PAGE_INVALID; 898} 899 900/* 901 * The following pte modification functions only work if 902 * pte_present() is true. Undefined behaviour if not.. 903 */ 904static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 905{ 906 pte_val(pte) &= _PAGE_CHG_MASK; 907 pte_val(pte) |= pgprot_val(newprot); 908 /* 909 * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX 910 * has the invalid bit set, clear it again for readable, young pages 911 */ 912 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ)) 913 pte_val(pte) &= ~_PAGE_INVALID; 914 /* 915 * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page 916 * protection bit set, clear it again for writable, dirty pages 917 */ 918 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE)) 919 pte_val(pte) &= ~_PAGE_PROTECT; 920 return pte; 921} 922 923static inline pte_t pte_wrprotect(pte_t pte) 924{ 925 pte_val(pte) &= ~_PAGE_WRITE; 926 pte_val(pte) |= _PAGE_PROTECT; 927 return pte; 928} 929 930static inline pte_t pte_mkwrite(pte_t pte) 931{ 932 pte_val(pte) |= _PAGE_WRITE; 933 if (pte_val(pte) & _PAGE_DIRTY) 934 pte_val(pte) &= ~_PAGE_PROTECT; 935 return pte; 936} 937 938static inline pte_t pte_mkclean(pte_t pte) 939{ 940 pte_val(pte) &= ~_PAGE_DIRTY; 941 pte_val(pte) |= _PAGE_PROTECT; 942 return pte; 943} 944 945static inline pte_t pte_mkdirty(pte_t pte) 946{ 947 pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY; 948 if (pte_val(pte) & _PAGE_WRITE) 949 pte_val(pte) &= ~_PAGE_PROTECT; 950 return pte; 951} 952 953static inline pte_t pte_mkold(pte_t pte) 954{ 955 pte_val(pte) &= ~_PAGE_YOUNG; 956 pte_val(pte) |= _PAGE_INVALID; 957 return pte; 958} 959 960static inline pte_t pte_mkyoung(pte_t pte) 961{ 962 pte_val(pte) |= _PAGE_YOUNG; 963 if (pte_val(pte) & _PAGE_READ) 964 pte_val(pte) &= ~_PAGE_INVALID; 965 return pte; 966} 967 968static inline pte_t pte_mkspecial(pte_t pte) 969{ 970 pte_val(pte) |= _PAGE_SPECIAL; 971 return pte; 972} 973 974#ifdef CONFIG_HUGETLB_PAGE 975static inline pte_t pte_mkhuge(pte_t pte) 976{ 977 pte_val(pte) |= _PAGE_LARGE; 978 return pte; 979} 980#endif 981 982#define IPTE_GLOBAL 0 983#define IPTE_LOCAL 1 984 985#define IPTE_NODAT 0x400 986#define IPTE_GUEST_ASCE 0x800 987 988static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep, 989 unsigned long opt, unsigned long asce, 990 int local) 991{ 992 unsigned long pto = (unsigned long) ptep; 993 994 if (__builtin_constant_p(opt) && opt == 0) { 995 /* Invalidation + TLB flush for the pte */ 996 asm volatile( 997 " .insn rrf,0xb2210000,%[r1],%[r2],0,%[m4]" 998 : "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address), 999 [m4] "i" (local)); 1000 return; 1001 } 1002 1003 /* Invalidate ptes with options + TLB flush of the ptes */ 1004 opt = opt | (asce & _ASCE_ORIGIN); 1005 asm volatile( 1006 " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]" 1007 : [r2] "+a" (address), [r3] "+a" (opt) 1008 : [r1] "a" (pto), [m4] "i" (local) : "memory"); 1009} 1010 1011static __always_inline void __ptep_ipte_range(unsigned long address, int nr, 1012 pte_t *ptep, int local) 1013{ 1014 unsigned long pto = (unsigned long) ptep; 1015 1016 /* Invalidate a range of ptes + TLB flush of the ptes */ 1017 do { 1018 asm volatile( 1019 " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]" 1020 : [r2] "+a" (address), [r3] "+a" (nr) 1021 : [r1] "a" (pto), [m4] "i" (local) : "memory"); 1022 } while (nr != 255); 1023} 1024 1025/* 1026 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush 1027 * both clear the TLB for the unmapped pte. The reason is that 1028 * ptep_get_and_clear is used in common code (e.g. change_pte_range) 1029 * to modify an active pte. The sequence is 1030 * 1) ptep_get_and_clear 1031 * 2) set_pte_at 1032 * 3) flush_tlb_range 1033 * On s390 the tlb needs to get flushed with the modification of the pte 1034 * if the pte is active. The only way how this can be implemented is to 1035 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range 1036 * is a nop. 1037 */ 1038pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t); 1039pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t); 1040 1041#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 1042static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, 1043 unsigned long addr, pte_t *ptep) 1044{ 1045 pte_t pte = *ptep; 1046 1047 pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte)); 1048 return pte_young(pte); 1049} 1050 1051#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 1052static inline int ptep_clear_flush_young(struct vm_area_struct *vma, 1053 unsigned long address, pte_t *ptep) 1054{ 1055 return ptep_test_and_clear_young(vma, address, ptep); 1056} 1057 1058#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 1059static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 1060 unsigned long addr, pte_t *ptep) 1061{ 1062 return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID)); 1063} 1064 1065#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION 1066pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *); 1067void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long, 1068 pte_t *, pte_t, pte_t); 1069 1070#define __HAVE_ARCH_PTEP_CLEAR_FLUSH 1071static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, 1072 unsigned long addr, pte_t *ptep) 1073{ 1074 return ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID)); 1075} 1076 1077/* 1078 * The batched pte unmap code uses ptep_get_and_clear_full to clear the 1079 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all 1080 * tlbs of an mm if it can guarantee that the ptes of the mm_struct 1081 * cannot be accessed while the batched unmap is running. In this case 1082 * full==1 and a simple pte_clear is enough. See tlb.h. 1083 */ 1084#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL 1085static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, 1086 unsigned long addr, 1087 pte_t *ptep, int full) 1088{ 1089 if (full) { 1090 pte_t pte = *ptep; 1091 *ptep = __pte(_PAGE_INVALID); 1092 return pte; 1093 } 1094 return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID)); 1095} 1096 1097#define __HAVE_ARCH_PTEP_SET_WRPROTECT 1098static inline void ptep_set_wrprotect(struct mm_struct *mm, 1099 unsigned long addr, pte_t *ptep) 1100{ 1101 pte_t pte = *ptep; 1102 1103 if (pte_write(pte)) 1104 ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte)); 1105} 1106 1107#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 1108static inline int ptep_set_access_flags(struct vm_area_struct *vma, 1109 unsigned long addr, pte_t *ptep, 1110 pte_t entry, int dirty) 1111{ 1112 if (pte_same(*ptep, entry)) 1113 return 0; 1114 ptep_xchg_direct(vma->vm_mm, addr, ptep, entry); 1115 return 1; 1116} 1117 1118/* 1119 * Additional functions to handle KVM guest page tables 1120 */ 1121void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr, 1122 pte_t *ptep, pte_t entry); 1123void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep); 1124void ptep_notify(struct mm_struct *mm, unsigned long addr, 1125 pte_t *ptep, unsigned long bits); 1126int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr, 1127 pte_t *ptep, int prot, unsigned long bit); 1128void ptep_zap_unused(struct mm_struct *mm, unsigned long addr, 1129 pte_t *ptep , int reset); 1130void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep); 1131int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr, 1132 pte_t *sptep, pte_t *tptep, pte_t pte); 1133void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep); 1134 1135bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address, 1136 pte_t *ptep); 1137int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, 1138 unsigned char key, bool nq); 1139int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr, 1140 unsigned char key, unsigned char *oldkey, 1141 bool nq, bool mr, bool mc); 1142int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr); 1143int get_guest_storage_key(struct mm_struct *mm, unsigned long addr, 1144 unsigned char *key); 1145 1146int set_pgste_bits(struct mm_struct *mm, unsigned long addr, 1147 unsigned long bits, unsigned long value); 1148int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep); 1149int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc, 1150 unsigned long *oldpte, unsigned long *oldpgste); 1151void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr); 1152void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr); 1153void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr); 1154void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr); 1155 1156/* 1157 * Certain architectures need to do special things when PTEs 1158 * within a page table are directly modified. Thus, the following 1159 * hook is made available. 1160 */ 1161static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 1162 pte_t *ptep, pte_t entry) 1163{ 1164 if (pte_present(entry)) 1165 pte_val(entry) &= ~_PAGE_UNUSED; 1166 if (mm_has_pgste(mm)) 1167 ptep_set_pte_at(mm, addr, ptep, entry); 1168 else 1169 *ptep = entry; 1170} 1171 1172/* 1173 * Conversion functions: convert a page and protection to a page entry, 1174 * and a page entry and page directory to the page they refer to. 1175 */ 1176static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) 1177{ 1178 pte_t __pte; 1179 pte_val(__pte) = physpage + pgprot_val(pgprot); 1180 if (!MACHINE_HAS_NX) 1181 pte_val(__pte) &= ~_PAGE_NOEXEC; 1182 return pte_mkyoung(__pte); 1183} 1184 1185static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) 1186{ 1187 unsigned long physpage = page_to_phys(page); 1188 pte_t __pte = mk_pte_phys(physpage, pgprot); 1189 1190 if (pte_write(__pte) && PageDirty(page)) 1191 __pte = pte_mkdirty(__pte); 1192 return __pte; 1193} 1194 1195#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) 1196#define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1)) 1197#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 1198#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) 1199#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1)) 1200 1201#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) 1202#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN) 1203#define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN) 1204#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) 1205 1206/* 1207 * The pgd_offset function *always* adds the index for the top-level 1208 * region/segment table. This is done to get a sequence like the 1209 * following to work: 1210 * pgdp = pgd_offset(current->mm, addr); 1211 * pgd = READ_ONCE(*pgdp); 1212 * p4dp = p4d_offset(&pgd, addr); 1213 * ... 1214 * The subsequent p4d_offset, pud_offset and pmd_offset functions 1215 * only add an index if they dereferenced the pointer. 1216 */ 1217static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address) 1218{ 1219 unsigned long rste; 1220 unsigned int shift; 1221 1222 /* Get the first entry of the top level table */ 1223 rste = pgd_val(*pgd); 1224 /* Pick up the shift from the table type of the first entry */ 1225 shift = ((rste & _REGION_ENTRY_TYPE_MASK) >> 2) * 11 + 20; 1226 return pgd + ((address >> shift) & (PTRS_PER_PGD - 1)); 1227} 1228 1229#define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address) 1230#define pgd_offset_k(address) pgd_offset(&init_mm, address) 1231 1232static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) 1233{ 1234 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1) 1235 return (p4d_t *) pgd_deref(*pgd) + p4d_index(address); 1236 return (p4d_t *) pgd; 1237} 1238 1239static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) 1240{ 1241 if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2) 1242 return (pud_t *) p4d_deref(*p4d) + pud_index(address); 1243 return (pud_t *) p4d; 1244} 1245 1246static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) 1247{ 1248 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3) 1249 return (pmd_t *) pud_deref(*pud) + pmd_index(address); 1250 return (pmd_t *) pud; 1251} 1252 1253static inline pte_t *pte_offset(pmd_t *pmd, unsigned long address) 1254{ 1255 return (pte_t *) pmd_deref(*pmd) + pte_index(address); 1256} 1257 1258#define pte_offset_kernel(pmd, address) pte_offset(pmd, address) 1259#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) 1260 1261static inline void pte_unmap(pte_t *pte) { } 1262 1263static inline bool gup_fast_permitted(unsigned long start, unsigned long end) 1264{ 1265 return end <= current->mm->context.asce_limit; 1266} 1267#define gup_fast_permitted gup_fast_permitted 1268 1269#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot)) 1270#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) 1271#define pte_page(x) pfn_to_page(pte_pfn(x)) 1272 1273#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd)) 1274#define pud_page(pud) pfn_to_page(pud_pfn(pud)) 1275#define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d)) 1276#define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd)) 1277 1278static inline pmd_t pmd_wrprotect(pmd_t pmd) 1279{ 1280 pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE; 1281 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; 1282 return pmd; 1283} 1284 1285static inline pmd_t pmd_mkwrite(pmd_t pmd) 1286{ 1287 pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE; 1288 if (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) 1289 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT; 1290 return pmd; 1291} 1292 1293static inline pmd_t pmd_mkclean(pmd_t pmd) 1294{ 1295 pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY; 1296 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; 1297 return pmd; 1298} 1299 1300static inline pmd_t pmd_mkdirty(pmd_t pmd) 1301{ 1302 pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_SOFT_DIRTY; 1303 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) 1304 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT; 1305 return pmd; 1306} 1307 1308static inline pud_t pud_wrprotect(pud_t pud) 1309{ 1310 pud_val(pud) &= ~_REGION3_ENTRY_WRITE; 1311 pud_val(pud) |= _REGION_ENTRY_PROTECT; 1312 return pud; 1313} 1314 1315static inline pud_t pud_mkwrite(pud_t pud) 1316{ 1317 pud_val(pud) |= _REGION3_ENTRY_WRITE; 1318 if (pud_val(pud) & _REGION3_ENTRY_DIRTY) 1319 pud_val(pud) &= ~_REGION_ENTRY_PROTECT; 1320 return pud; 1321} 1322 1323static inline pud_t pud_mkclean(pud_t pud) 1324{ 1325 pud_val(pud) &= ~_REGION3_ENTRY_DIRTY; 1326 pud_val(pud) |= _REGION_ENTRY_PROTECT; 1327 return pud; 1328} 1329 1330static inline pud_t pud_mkdirty(pud_t pud) 1331{ 1332 pud_val(pud) |= _REGION3_ENTRY_DIRTY | _REGION3_ENTRY_SOFT_DIRTY; 1333 if (pud_val(pud) & _REGION3_ENTRY_WRITE) 1334 pud_val(pud) &= ~_REGION_ENTRY_PROTECT; 1335 return pud; 1336} 1337 1338#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE) 1339static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) 1340{ 1341 /* 1342 * pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX 1343 * (see __Pxxx / __Sxxx). Convert to segment table entry format. 1344 */ 1345 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE)) 1346 return pgprot_val(SEGMENT_NONE); 1347 if (pgprot_val(pgprot) == pgprot_val(PAGE_RO)) 1348 return pgprot_val(SEGMENT_RO); 1349 if (pgprot_val(pgprot) == pgprot_val(PAGE_RX)) 1350 return pgprot_val(SEGMENT_RX); 1351 if (pgprot_val(pgprot) == pgprot_val(PAGE_RW)) 1352 return pgprot_val(SEGMENT_RW); 1353 return pgprot_val(SEGMENT_RWX); 1354} 1355 1356static inline pmd_t pmd_mkyoung(pmd_t pmd) 1357{ 1358 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG; 1359 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ) 1360 pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID; 1361 return pmd; 1362} 1363 1364static inline pmd_t pmd_mkold(pmd_t pmd) 1365{ 1366 pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG; 1367 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; 1368 return pmd; 1369} 1370 1371static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 1372{ 1373 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE | 1374 _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG | 1375 _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY; 1376 pmd_val(pmd) |= massage_pgprot_pmd(newprot); 1377 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)) 1378 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; 1379 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG)) 1380 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; 1381 return pmd; 1382} 1383 1384static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot) 1385{ 1386 pmd_t __pmd; 1387 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot); 1388 return __pmd; 1389} 1390 1391#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */ 1392 1393static inline void __pmdp_csp(pmd_t *pmdp) 1394{ 1395 csp((unsigned int *)pmdp + 1, pmd_val(*pmdp), 1396 pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID); 1397} 1398 1399#define IDTE_GLOBAL 0 1400#define IDTE_LOCAL 1 1401 1402#define IDTE_PTOA 0x0800 1403#define IDTE_NODAT 0x1000 1404#define IDTE_GUEST_ASCE 0x2000 1405 1406static __always_inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp, 1407 unsigned long opt, unsigned long asce, 1408 int local) 1409{ 1410 unsigned long sto; 1411 1412 sto = (unsigned long) pmdp - pmd_index(addr) * sizeof(pmd_t); 1413 if (__builtin_constant_p(opt) && opt == 0) { 1414 /* flush without guest asce */ 1415 asm volatile( 1416 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]" 1417 : "+m" (*pmdp) 1418 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)), 1419 [m4] "i" (local) 1420 : "cc" ); 1421 } else { 1422 /* flush with guest asce */ 1423 asm volatile( 1424 " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]" 1425 : "+m" (*pmdp) 1426 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt), 1427 [r3] "a" (asce), [m4] "i" (local) 1428 : "cc" ); 1429 } 1430} 1431 1432static __always_inline void __pudp_idte(unsigned long addr, pud_t *pudp, 1433 unsigned long opt, unsigned long asce, 1434 int local) 1435{ 1436 unsigned long r3o; 1437 1438 r3o = (unsigned long) pudp - pud_index(addr) * sizeof(pud_t); 1439 r3o |= _ASCE_TYPE_REGION3; 1440 if (__builtin_constant_p(opt) && opt == 0) { 1441 /* flush without guest asce */ 1442 asm volatile( 1443 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]" 1444 : "+m" (*pudp) 1445 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)), 1446 [m4] "i" (local) 1447 : "cc"); 1448 } else { 1449 /* flush with guest asce */ 1450 asm volatile( 1451 " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]" 1452 : "+m" (*pudp) 1453 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt), 1454 [r3] "a" (asce), [m4] "i" (local) 1455 : "cc" ); 1456 } 1457} 1458 1459pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t); 1460pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t); 1461pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t); 1462 1463#ifdef CONFIG_TRANSPARENT_HUGEPAGE 1464 1465#define __HAVE_ARCH_PGTABLE_DEPOSIT 1466void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 1467 pgtable_t pgtable); 1468 1469#define __HAVE_ARCH_PGTABLE_WITHDRAW 1470pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); 1471 1472#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 1473static inline int pmdp_set_access_flags(struct vm_area_struct *vma, 1474 unsigned long addr, pmd_t *pmdp, 1475 pmd_t entry, int dirty) 1476{ 1477 VM_BUG_ON(addr & ~HPAGE_MASK); 1478 1479 entry = pmd_mkyoung(entry); 1480 if (dirty) 1481 entry = pmd_mkdirty(entry); 1482 if (pmd_val(*pmdp) == pmd_val(entry)) 1483 return 0; 1484 pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry); 1485 return 1; 1486} 1487 1488#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG 1489static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, 1490 unsigned long addr, pmd_t *pmdp) 1491{ 1492 pmd_t pmd = *pmdp; 1493 1494 pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd)); 1495 return pmd_young(pmd); 1496} 1497 1498#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH 1499static inline int pmdp_clear_flush_young(struct vm_area_struct *vma, 1500 unsigned long addr, pmd_t *pmdp) 1501{ 1502 VM_BUG_ON(addr & ~HPAGE_MASK); 1503 return pmdp_test_and_clear_young(vma, addr, pmdp); 1504} 1505 1506static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 1507 pmd_t *pmdp, pmd_t entry) 1508{ 1509 if (!MACHINE_HAS_NX) 1510 pmd_val(entry) &= ~_SEGMENT_ENTRY_NOEXEC; 1511 *pmdp = entry; 1512} 1513 1514static inline pmd_t pmd_mkhuge(pmd_t pmd) 1515{ 1516 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE; 1517 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG; 1518 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; 1519 return pmd; 1520} 1521 1522#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR 1523static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, 1524 unsigned long addr, pmd_t *pmdp) 1525{ 1526 return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); 1527} 1528 1529#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL 1530static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm, 1531 unsigned long addr, 1532 pmd_t *pmdp, int full) 1533{ 1534 if (full) { 1535 pmd_t pmd = *pmdp; 1536 *pmdp = __pmd(_SEGMENT_ENTRY_EMPTY); 1537 return pmd; 1538 } 1539 return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); 1540} 1541 1542#define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH 1543static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, 1544 unsigned long addr, pmd_t *pmdp) 1545{ 1546 return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp); 1547} 1548 1549#define __HAVE_ARCH_PMDP_INVALIDATE 1550static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma, 1551 unsigned long addr, pmd_t *pmdp) 1552{ 1553 pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID); 1554 1555 return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd); 1556} 1557 1558#define __HAVE_ARCH_PMDP_SET_WRPROTECT 1559static inline void pmdp_set_wrprotect(struct mm_struct *mm, 1560 unsigned long addr, pmd_t *pmdp) 1561{ 1562 pmd_t pmd = *pmdp; 1563 1564 if (pmd_write(pmd)) 1565 pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd)); 1566} 1567 1568static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, 1569 unsigned long address, 1570 pmd_t *pmdp) 1571{ 1572 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); 1573} 1574#define pmdp_collapse_flush pmdp_collapse_flush 1575 1576#define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot)) 1577#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) 1578 1579static inline int pmd_trans_huge(pmd_t pmd) 1580{ 1581 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE; 1582} 1583 1584#define has_transparent_hugepage has_transparent_hugepage 1585static inline int has_transparent_hugepage(void) 1586{ 1587 return MACHINE_HAS_EDAT1 ? 1 : 0; 1588} 1589#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1590 1591/* 1592 * 64 bit swap entry format: 1593 * A page-table entry has some bits we have to treat in a special way. 1594 * Bits 52 and bit 55 have to be zero, otherwise a specification 1595 * exception will occur instead of a page translation exception. The 1596 * specification exception has the bad habit not to store necessary 1597 * information in the lowcore. 1598 * Bits 54 and 63 are used to indicate the page type. 1599 * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200 1600 * This leaves the bits 0-51 and bits 56-62 to store type and offset. 1601 * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51 1602 * for the offset. 1603 * | offset |01100|type |00| 1604 * |0000000000111111111122222222223333333333444444444455|55555|55566|66| 1605 * |0123456789012345678901234567890123456789012345678901|23456|78901|23| 1606 */ 1607 1608#define __SWP_OFFSET_MASK ((1UL << 52) - 1) 1609#define __SWP_OFFSET_SHIFT 12 1610#define __SWP_TYPE_MASK ((1UL << 5) - 1) 1611#define __SWP_TYPE_SHIFT 2 1612 1613static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) 1614{ 1615 pte_t pte; 1616 1617 pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT; 1618 pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT; 1619 pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT; 1620 return pte; 1621} 1622 1623static inline unsigned long __swp_type(swp_entry_t entry) 1624{ 1625 return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK; 1626} 1627 1628static inline unsigned long __swp_offset(swp_entry_t entry) 1629{ 1630 return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK; 1631} 1632 1633static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset) 1634{ 1635 return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) }; 1636} 1637 1638#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 1639#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 1640 1641#define kern_addr_valid(addr) (1) 1642 1643extern int vmem_add_mapping(unsigned long start, unsigned long size); 1644extern int vmem_remove_mapping(unsigned long start, unsigned long size); 1645extern int s390_enable_sie(void); 1646extern int s390_enable_skey(void); 1647extern void s390_reset_cmma(struct mm_struct *mm); 1648 1649/* s390 has a private copy of get unmapped area to deal with cache synonyms */ 1650#define HAVE_ARCH_UNMAPPED_AREA 1651#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 1652 1653#include <asm-generic/pgtable.h> 1654 1655#endif /* _S390_PAGE_H */