Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at c9a28fa7b9ac19b676deefa0a171ce7df8755c08 949 lines 30 kB view raw
1/* 2 * include/asm-s390/pgtable.h 3 * 4 * S390 version 5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Author(s): Hartmut Penner (hp@de.ibm.com) 7 * Ulrich Weigand (weigand@de.ibm.com) 8 * Martin Schwidefsky (schwidefsky@de.ibm.com) 9 * 10 * Derived from "include/asm-i386/pgtable.h" 11 */ 12 13#ifndef _ASM_S390_PGTABLE_H 14#define _ASM_S390_PGTABLE_H 15 16/* 17 * The Linux memory management assumes a three-level page table setup. For 18 * s390 31 bit we "fold" the mid level into the top-level page table, so 19 * that we physically have the same two-level page table as the s390 mmu 20 * expects in 31 bit mode. For s390 64 bit we use three of the five levels 21 * the hardware provides (region first and region second tables are not 22 * used). 23 * 24 * The "pgd_xxx()" functions are trivial for a folded two-level 25 * setup: the pgd is never bad, and a pmd always exists (as it's folded 26 * into the pgd entry) 27 * 28 * This file contains the functions and defines necessary to modify and use 29 * the S390 page table tree. 30 */ 31#ifndef __ASSEMBLY__ 32#include <linux/mm_types.h> 33#include <asm/bug.h> 34#include <asm/processor.h> 35 36extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096))); 37extern void paging_init(void); 38extern void vmem_map_init(void); 39 40/* 41 * The S390 doesn't have any external MMU info: the kernel page 42 * tables contain all the necessary information. 43 */ 44#define update_mmu_cache(vma, address, pte) do { } while (0) 45 46/* 47 * ZERO_PAGE is a global shared page that is always zero: used 48 * for zero-mapped memory areas etc.. 49 */ 50extern char empty_zero_page[PAGE_SIZE]; 51#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 52#endif /* !__ASSEMBLY__ */ 53 54/* 55 * PMD_SHIFT determines the size of the area a second-level page 56 * table can map 57 * PGDIR_SHIFT determines what a third-level page table entry can map 58 */ 59#ifndef __s390x__ 60# define PMD_SHIFT 22 61# define PUD_SHIFT 22 62# define PGDIR_SHIFT 22 63#else /* __s390x__ */ 64# define PMD_SHIFT 21 65# define PUD_SHIFT 31 66# define PGDIR_SHIFT 31 67#endif /* __s390x__ */ 68 69#define PMD_SIZE (1UL << PMD_SHIFT) 70#define PMD_MASK (~(PMD_SIZE-1)) 71#define PUD_SIZE (1UL << PUD_SHIFT) 72#define PUD_MASK (~(PUD_SIZE-1)) 73#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 74#define PGDIR_MASK (~(PGDIR_SIZE-1)) 75 76/* 77 * entries per page directory level: the S390 is two-level, so 78 * we don't really have any PMD directory physically. 79 * for S390 segment-table entries are combined to one PGD 80 * that leads to 1024 pte per pgd 81 */ 82#ifndef __s390x__ 83# define PTRS_PER_PTE 1024 84# define PTRS_PER_PMD 1 85# define PTRS_PER_PUD 1 86# define PTRS_PER_PGD 512 87#else /* __s390x__ */ 88# define PTRS_PER_PTE 512 89# define PTRS_PER_PMD 1024 90# define PTRS_PER_PUD 1 91# define PTRS_PER_PGD 2048 92#endif /* __s390x__ */ 93 94#define FIRST_USER_ADDRESS 0 95 96#define pte_ERROR(e) \ 97 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e)) 98#define pmd_ERROR(e) \ 99 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e)) 100#define pud_ERROR(e) \ 101 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e)) 102#define pgd_ERROR(e) \ 103 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e)) 104 105#ifndef __ASSEMBLY__ 106/* 107 * The vmalloc area will always be on the topmost area of the kernel 108 * mapping. We reserve 96MB (31bit) / 1GB (64bit) for vmalloc, 109 * which should be enough for any sane case. 110 * By putting vmalloc at the top, we maximise the gap between physical 111 * memory and vmalloc to catch misplaced memory accesses. As a side 112 * effect, this also makes sure that 64 bit module code cannot be used 113 * as system call address. 114 */ 115#ifndef __s390x__ 116#define VMALLOC_START 0x78000000UL 117#define VMALLOC_END 0x7e000000UL 118#define VMEM_MAP_MAX 0x80000000UL 119#else /* __s390x__ */ 120#define VMALLOC_START 0x3e000000000UL 121#define VMALLOC_END 0x3e040000000UL 122#define VMEM_MAP_MAX 0x40000000000UL 123#endif /* __s390x__ */ 124 125#define VMEM_MAP ((struct page *) VMALLOC_END) 126#define VMEM_MAP_SIZE ((VMALLOC_START / PAGE_SIZE) * sizeof(struct page)) 127 128/* 129 * A 31 bit pagetable entry of S390 has following format: 130 * | PFRA | | OS | 131 * 0 0IP0 132 * 00000000001111111111222222222233 133 * 01234567890123456789012345678901 134 * 135 * I Page-Invalid Bit: Page is not available for address-translation 136 * P Page-Protection Bit: Store access not possible for page 137 * 138 * A 31 bit segmenttable entry of S390 has following format: 139 * | P-table origin | |PTL 140 * 0 IC 141 * 00000000001111111111222222222233 142 * 01234567890123456789012345678901 143 * 144 * I Segment-Invalid Bit: Segment is not available for address-translation 145 * C Common-Segment Bit: Segment is not private (PoP 3-30) 146 * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256) 147 * 148 * The 31 bit segmenttable origin of S390 has following format: 149 * 150 * |S-table origin | | STL | 151 * X **GPS 152 * 00000000001111111111222222222233 153 * 01234567890123456789012345678901 154 * 155 * X Space-Switch event: 156 * G Segment-Invalid Bit: * 157 * P Private-Space Bit: Segment is not private (PoP 3-30) 158 * S Storage-Alteration: 159 * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048) 160 * 161 * A 64 bit pagetable entry of S390 has following format: 162 * | PFRA |0IP0| OS | 163 * 0000000000111111111122222222223333333333444444444455555555556666 164 * 0123456789012345678901234567890123456789012345678901234567890123 165 * 166 * I Page-Invalid Bit: Page is not available for address-translation 167 * P Page-Protection Bit: Store access not possible for page 168 * 169 * A 64 bit segmenttable entry of S390 has following format: 170 * | P-table origin | TT 171 * 0000000000111111111122222222223333333333444444444455555555556666 172 * 0123456789012345678901234567890123456789012345678901234567890123 173 * 174 * I Segment-Invalid Bit: Segment is not available for address-translation 175 * C Common-Segment Bit: Segment is not private (PoP 3-30) 176 * P Page-Protection Bit: Store access not possible for page 177 * TT Type 00 178 * 179 * A 64 bit region table entry of S390 has following format: 180 * | S-table origin | TF TTTL 181 * 0000000000111111111122222222223333333333444444444455555555556666 182 * 0123456789012345678901234567890123456789012345678901234567890123 183 * 184 * I Segment-Invalid Bit: Segment is not available for address-translation 185 * TT Type 01 186 * TF 187 * TL Table length 188 * 189 * The 64 bit regiontable origin of S390 has following format: 190 * | region table origon | DTTL 191 * 0000000000111111111122222222223333333333444444444455555555556666 192 * 0123456789012345678901234567890123456789012345678901234567890123 193 * 194 * X Space-Switch event: 195 * G Segment-Invalid Bit: 196 * P Private-Space Bit: 197 * S Storage-Alteration: 198 * R Real space 199 * TL Table-Length: 200 * 201 * A storage key has the following format: 202 * | ACC |F|R|C|0| 203 * 0 3 4 5 6 7 204 * ACC: access key 205 * F : fetch protection bit 206 * R : referenced bit 207 * C : changed bit 208 */ 209 210/* Hardware bits in the page table entry */ 211#define _PAGE_RO 0x200 /* HW read-only bit */ 212#define _PAGE_INVALID 0x400 /* HW invalid bit */ 213 214/* Software bits in the page table entry */ 215#define _PAGE_SWT 0x001 /* SW pte type bit t */ 216#define _PAGE_SWX 0x002 /* SW pte type bit x */ 217 218/* Six different types of pages. */ 219#define _PAGE_TYPE_EMPTY 0x400 220#define _PAGE_TYPE_NONE 0x401 221#define _PAGE_TYPE_SWAP 0x403 222#define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */ 223#define _PAGE_TYPE_RO 0x200 224#define _PAGE_TYPE_RW 0x000 225#define _PAGE_TYPE_EX_RO 0x202 226#define _PAGE_TYPE_EX_RW 0x002 227 228/* 229 * PTE type bits are rather complicated. handle_pte_fault uses pte_present, 230 * pte_none and pte_file to find out the pte type WITHOUT holding the page 231 * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to 232 * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs 233 * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards. 234 * This change is done while holding the lock, but the intermediate step 235 * of a previously valid pte with the hw invalid bit set can be observed by 236 * handle_pte_fault. That makes it necessary that all valid pte types with 237 * the hw invalid bit set must be distinguishable from the four pte types 238 * empty, none, swap and file. 239 * 240 * irxt ipte irxt 241 * _PAGE_TYPE_EMPTY 1000 -> 1000 242 * _PAGE_TYPE_NONE 1001 -> 1001 243 * _PAGE_TYPE_SWAP 1011 -> 1011 244 * _PAGE_TYPE_FILE 11?1 -> 11?1 245 * _PAGE_TYPE_RO 0100 -> 1100 246 * _PAGE_TYPE_RW 0000 -> 1000 247 * _PAGE_TYPE_EX_RO 0110 -> 1110 248 * _PAGE_TYPE_EX_RW 0010 -> 1010 249 * 250 * pte_none is true for bits combinations 1000, 1010, 1100, 1110 251 * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001 252 * pte_file is true for bits combinations 1101, 1111 253 * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid. 254 */ 255 256#ifndef __s390x__ 257 258/* Bits in the segment table address-space-control-element */ 259#define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */ 260#define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */ 261#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ 262#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ 263#define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */ 264 265/* Bits in the segment table entry */ 266#define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */ 267#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ 268#define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */ 269#define _SEGMENT_ENTRY_PTL 0x0f /* page table length */ 270 271#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL) 272#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) 273 274#else /* __s390x__ */ 275 276/* Bits in the segment/region table address-space-control-element */ 277#define _ASCE_ORIGIN ~0xfffUL/* segment table origin */ 278#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ 279#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ 280#define _ASCE_SPACE_SWITCH 0x40 /* space switch event */ 281#define _ASCE_REAL_SPACE 0x20 /* real space control */ 282#define _ASCE_TYPE_MASK 0x0c /* asce table type mask */ 283#define _ASCE_TYPE_REGION1 0x0c /* region first table type */ 284#define _ASCE_TYPE_REGION2 0x08 /* region second table type */ 285#define _ASCE_TYPE_REGION3 0x04 /* region third table type */ 286#define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */ 287#define _ASCE_TABLE_LENGTH 0x03 /* region table length */ 288 289/* Bits in the region table entry */ 290#define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */ 291#define _REGION_ENTRY_INV 0x20 /* invalid region table entry */ 292#define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */ 293#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */ 294#define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */ 295#define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */ 296#define _REGION_ENTRY_LENGTH 0x03 /* region third length */ 297 298#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH) 299#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV) 300#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH) 301#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV) 302#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH) 303#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV) 304 305/* Bits in the segment table entry */ 306#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ 307#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ 308#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ 309 310#define _SEGMENT_ENTRY (0) 311#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) 312 313#endif /* __s390x__ */ 314 315/* 316 * A user page table pointer has the space-switch-event bit, the 317 * private-space-control bit and the storage-alteration-event-control 318 * bit set. A kernel page table pointer doesn't need them. 319 */ 320#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \ 321 _ASCE_ALT_EVENT) 322 323/* Bits int the storage key */ 324#define _PAGE_CHANGED 0x02 /* HW changed bit */ 325#define _PAGE_REFERENCED 0x04 /* HW referenced bit */ 326 327/* 328 * Page protection definitions. 329 */ 330#define PAGE_NONE __pgprot(_PAGE_TYPE_NONE) 331#define PAGE_RO __pgprot(_PAGE_TYPE_RO) 332#define PAGE_RW __pgprot(_PAGE_TYPE_RW) 333#define PAGE_EX_RO __pgprot(_PAGE_TYPE_EX_RO) 334#define PAGE_EX_RW __pgprot(_PAGE_TYPE_EX_RW) 335 336#define PAGE_KERNEL PAGE_RW 337#define PAGE_COPY PAGE_RO 338 339/* 340 * Dependent on the EXEC_PROTECT option s390 can do execute protection. 341 * Write permission always implies read permission. In theory with a 342 * primary/secondary page table execute only can be implemented but 343 * it would cost an additional bit in the pte to distinguish all the 344 * different pte types. To avoid that execute permission currently 345 * implies read permission as well. 346 */ 347 /*xwr*/ 348#define __P000 PAGE_NONE 349#define __P001 PAGE_RO 350#define __P010 PAGE_RO 351#define __P011 PAGE_RO 352#define __P100 PAGE_EX_RO 353#define __P101 PAGE_EX_RO 354#define __P110 PAGE_EX_RO 355#define __P111 PAGE_EX_RO 356 357#define __S000 PAGE_NONE 358#define __S001 PAGE_RO 359#define __S010 PAGE_RW 360#define __S011 PAGE_RW 361#define __S100 PAGE_EX_RO 362#define __S101 PAGE_EX_RO 363#define __S110 PAGE_EX_RW 364#define __S111 PAGE_EX_RW 365 366#ifndef __s390x__ 367# define PxD_SHADOW_SHIFT 1 368#else /* __s390x__ */ 369# define PxD_SHADOW_SHIFT 2 370#endif /* __s390x__ */ 371 372static inline struct page *get_shadow_page(struct page *page) 373{ 374 if (s390_noexec && page->index) 375 return virt_to_page((void *)(addr_t) page->index); 376 return NULL; 377} 378 379static inline void *get_shadow_pte(void *table) 380{ 381 unsigned long addr, offset; 382 struct page *page; 383 384 addr = (unsigned long) table; 385 offset = addr & (PAGE_SIZE - 1); 386 page = virt_to_page((void *)(addr ^ offset)); 387 return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL); 388} 389 390static inline void *get_shadow_table(void *table) 391{ 392 unsigned long addr, offset; 393 struct page *page; 394 395 addr = (unsigned long) table; 396 offset = addr & ((PAGE_SIZE << PxD_SHADOW_SHIFT) - 1); 397 page = virt_to_page((void *)(addr ^ offset)); 398 return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL); 399} 400 401/* 402 * Certain architectures need to do special things when PTEs 403 * within a page table are directly modified. Thus, the following 404 * hook is made available. 405 */ 406static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 407 pte_t *pteptr, pte_t pteval) 408{ 409 pte_t *shadow_pte = get_shadow_pte(pteptr); 410 411 *pteptr = pteval; 412 if (shadow_pte) { 413 if (!(pte_val(pteval) & _PAGE_INVALID) && 414 (pte_val(pteval) & _PAGE_SWX)) 415 pte_val(*shadow_pte) = pte_val(pteval) | _PAGE_RO; 416 else 417 pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY; 418 } 419} 420 421/* 422 * pgd/pmd/pte query functions 423 */ 424#ifndef __s390x__ 425 426static inline int pgd_present(pgd_t pgd) { return 1; } 427static inline int pgd_none(pgd_t pgd) { return 0; } 428static inline int pgd_bad(pgd_t pgd) { return 0; } 429 430static inline int pud_present(pud_t pud) { return 1; } 431static inline int pud_none(pud_t pud) { return 0; } 432static inline int pud_bad(pud_t pud) { return 0; } 433 434#else /* __s390x__ */ 435 436static inline int pgd_present(pgd_t pgd) { return 1; } 437static inline int pgd_none(pgd_t pgd) { return 0; } 438static inline int pgd_bad(pgd_t pgd) { return 0; } 439 440static inline int pud_present(pud_t pud) 441{ 442 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL; 443} 444 445static inline int pud_none(pud_t pud) 446{ 447 return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL; 448} 449 450static inline int pud_bad(pud_t pud) 451{ 452 unsigned long mask = ~_REGION_ENTRY_ORIGIN & ~_REGION_ENTRY_INV; 453 return (pud_val(pud) & mask) != _REGION3_ENTRY; 454} 455 456#endif /* __s390x__ */ 457 458static inline int pmd_present(pmd_t pmd) 459{ 460 return (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) != 0UL; 461} 462 463static inline int pmd_none(pmd_t pmd) 464{ 465 return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) != 0UL; 466} 467 468static inline int pmd_bad(pmd_t pmd) 469{ 470 unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV; 471 return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY; 472} 473 474static inline int pte_none(pte_t pte) 475{ 476 return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT); 477} 478 479static inline int pte_present(pte_t pte) 480{ 481 unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX; 482 return (pte_val(pte) & mask) == _PAGE_TYPE_NONE || 483 (!(pte_val(pte) & _PAGE_INVALID) && 484 !(pte_val(pte) & _PAGE_SWT)); 485} 486 487static inline int pte_file(pte_t pte) 488{ 489 unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT; 490 return (pte_val(pte) & mask) == _PAGE_TYPE_FILE; 491} 492 493#define __HAVE_ARCH_PTE_SAME 494#define pte_same(a,b) (pte_val(a) == pte_val(b)) 495 496/* 497 * query functions pte_write/pte_dirty/pte_young only work if 498 * pte_present() is true. Undefined behaviour if not.. 499 */ 500static inline int pte_write(pte_t pte) 501{ 502 return (pte_val(pte) & _PAGE_RO) == 0; 503} 504 505static inline int pte_dirty(pte_t pte) 506{ 507 /* A pte is neither clean nor dirty on s/390. The dirty bit 508 * is in the storage key. See page_test_and_clear_dirty for 509 * details. 510 */ 511 return 0; 512} 513 514static inline int pte_young(pte_t pte) 515{ 516 /* A pte is neither young nor old on s/390. The young bit 517 * is in the storage key. See page_test_and_clear_young for 518 * details. 519 */ 520 return 0; 521} 522 523/* 524 * pgd/pmd/pte modification functions 525 */ 526 527#ifndef __s390x__ 528 529#define pgd_clear(pgd) do { } while (0) 530#define pud_clear(pud) do { } while (0) 531 532static inline void pmd_clear_kernel(pmd_t * pmdp) 533{ 534 pmd_val(pmdp[0]) = _SEGMENT_ENTRY_EMPTY; 535 pmd_val(pmdp[1]) = _SEGMENT_ENTRY_EMPTY; 536 pmd_val(pmdp[2]) = _SEGMENT_ENTRY_EMPTY; 537 pmd_val(pmdp[3]) = _SEGMENT_ENTRY_EMPTY; 538} 539 540#else /* __s390x__ */ 541 542#define pgd_clear(pgd) do { } while (0) 543 544static inline void pud_clear_kernel(pud_t *pud) 545{ 546 pud_val(*pud) = _REGION3_ENTRY_EMPTY; 547} 548 549static inline void pud_clear(pud_t * pud) 550{ 551 pud_t *shadow = get_shadow_table(pud); 552 553 pud_clear_kernel(pud); 554 if (shadow) 555 pud_clear_kernel(shadow); 556} 557 558static inline void pmd_clear_kernel(pmd_t * pmdp) 559{ 560 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; 561 pmd_val1(*pmdp) = _SEGMENT_ENTRY_EMPTY; 562} 563 564#endif /* __s390x__ */ 565 566static inline void pmd_clear(pmd_t * pmdp) 567{ 568 pmd_t *shadow_pmd = get_shadow_table(pmdp); 569 570 pmd_clear_kernel(pmdp); 571 if (shadow_pmd) 572 pmd_clear_kernel(shadow_pmd); 573} 574 575static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 576{ 577 pte_t *shadow_pte = get_shadow_pte(ptep); 578 579 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 580 if (shadow_pte) 581 pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY; 582} 583 584/* 585 * The following pte modification functions only work if 586 * pte_present() is true. Undefined behaviour if not.. 587 */ 588static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 589{ 590 pte_val(pte) &= PAGE_MASK; 591 pte_val(pte) |= pgprot_val(newprot); 592 return pte; 593} 594 595static inline pte_t pte_wrprotect(pte_t pte) 596{ 597 /* Do not clobber _PAGE_TYPE_NONE pages! */ 598 if (!(pte_val(pte) & _PAGE_INVALID)) 599 pte_val(pte) |= _PAGE_RO; 600 return pte; 601} 602 603static inline pte_t pte_mkwrite(pte_t pte) 604{ 605 pte_val(pte) &= ~_PAGE_RO; 606 return pte; 607} 608 609static inline pte_t pte_mkclean(pte_t pte) 610{ 611 /* The only user of pte_mkclean is the fork() code. 612 We must *not* clear the *physical* page dirty bit 613 just because fork() wants to clear the dirty bit in 614 *one* of the page's mappings. So we just do nothing. */ 615 return pte; 616} 617 618static inline pte_t pte_mkdirty(pte_t pte) 619{ 620 /* We do not explicitly set the dirty bit because the 621 * sske instruction is slow. It is faster to let the 622 * next instruction set the dirty bit. 623 */ 624 return pte; 625} 626 627static inline pte_t pte_mkold(pte_t pte) 628{ 629 /* S/390 doesn't keep its dirty/referenced bit in the pte. 630 * There is no point in clearing the real referenced bit. 631 */ 632 return pte; 633} 634 635static inline pte_t pte_mkyoung(pte_t pte) 636{ 637 /* S/390 doesn't keep its dirty/referenced bit in the pte. 638 * There is no point in setting the real referenced bit. 639 */ 640 return pte; 641} 642 643#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 644static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, 645 unsigned long addr, pte_t *ptep) 646{ 647 return 0; 648} 649 650#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 651static inline int ptep_clear_flush_young(struct vm_area_struct *vma, 652 unsigned long address, pte_t *ptep) 653{ 654 /* No need to flush TLB; bits are in storage key */ 655 return 0; 656} 657 658static inline void __ptep_ipte(unsigned long address, pte_t *ptep) 659{ 660 if (!(pte_val(*ptep) & _PAGE_INVALID)) { 661#ifndef __s390x__ 662 /* S390 has 1mb segments, we are emulating 4MB segments */ 663 pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00); 664#else 665 /* ipte in zarch mode can do the math */ 666 pte_t *pto = ptep; 667#endif 668 asm volatile( 669 " ipte %2,%3" 670 : "=m" (*ptep) : "m" (*ptep), 671 "a" (pto), "a" (address)); 672 } 673 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 674} 675 676static inline void ptep_invalidate(unsigned long address, pte_t *ptep) 677{ 678 __ptep_ipte(address, ptep); 679 ptep = get_shadow_pte(ptep); 680 if (ptep) 681 __ptep_ipte(address, ptep); 682} 683 684/* 685 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush 686 * both clear the TLB for the unmapped pte. The reason is that 687 * ptep_get_and_clear is used in common code (e.g. change_pte_range) 688 * to modify an active pte. The sequence is 689 * 1) ptep_get_and_clear 690 * 2) set_pte_at 691 * 3) flush_tlb_range 692 * On s390 the tlb needs to get flushed with the modification of the pte 693 * if the pte is active. The only way how this can be implemented is to 694 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range 695 * is a nop. 696 */ 697#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 698#define ptep_get_and_clear(__mm, __address, __ptep) \ 699({ \ 700 pte_t __pte = *(__ptep); \ 701 if (atomic_read(&(__mm)->mm_users) > 1 || \ 702 (__mm) != current->active_mm) \ 703 ptep_invalidate(__address, __ptep); \ 704 else \ 705 pte_clear((__mm), (__address), (__ptep)); \ 706 __pte; \ 707}) 708 709#define __HAVE_ARCH_PTEP_CLEAR_FLUSH 710static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, 711 unsigned long address, pte_t *ptep) 712{ 713 pte_t pte = *ptep; 714 ptep_invalidate(address, ptep); 715 return pte; 716} 717 718/* 719 * The batched pte unmap code uses ptep_get_and_clear_full to clear the 720 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all 721 * tlbs of an mm if it can guarantee that the ptes of the mm_struct 722 * cannot be accessed while the batched unmap is running. In this case 723 * full==1 and a simple pte_clear is enough. See tlb.h. 724 */ 725#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL 726static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, 727 unsigned long addr, 728 pte_t *ptep, int full) 729{ 730 pte_t pte = *ptep; 731 732 if (full) 733 pte_clear(mm, addr, ptep); 734 else 735 ptep_invalidate(addr, ptep); 736 return pte; 737} 738 739#define __HAVE_ARCH_PTEP_SET_WRPROTECT 740#define ptep_set_wrprotect(__mm, __addr, __ptep) \ 741({ \ 742 pte_t __pte = *(__ptep); \ 743 if (pte_write(__pte)) { \ 744 if (atomic_read(&(__mm)->mm_users) > 1 || \ 745 (__mm) != current->active_mm) \ 746 ptep_invalidate(__addr, __ptep); \ 747 set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \ 748 } \ 749}) 750 751#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 752#define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \ 753({ \ 754 int __changed = !pte_same(*(__ptep), __entry); \ 755 if (__changed) { \ 756 ptep_invalidate(__addr, __ptep); \ 757 set_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \ 758 } \ 759 __changed; \ 760}) 761 762/* 763 * Test and clear dirty bit in storage key. 764 * We can't clear the changed bit atomically. This is a potential 765 * race against modification of the referenced bit. This function 766 * should therefore only be called if it is not mapped in any 767 * address space. 768 */ 769#define __HAVE_ARCH_PAGE_TEST_DIRTY 770static inline int page_test_dirty(struct page *page) 771{ 772 return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0; 773} 774 775#define __HAVE_ARCH_PAGE_CLEAR_DIRTY 776static inline void page_clear_dirty(struct page *page) 777{ 778 page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY); 779} 780 781/* 782 * Test and clear referenced bit in storage key. 783 */ 784#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG 785static inline int page_test_and_clear_young(struct page *page) 786{ 787 unsigned long physpage = page_to_phys(page); 788 int ccode; 789 790 asm volatile( 791 " rrbe 0,%1\n" 792 " ipm %0\n" 793 " srl %0,28\n" 794 : "=d" (ccode) : "a" (physpage) : "cc" ); 795 return ccode & 2; 796} 797 798/* 799 * Conversion functions: convert a page and protection to a page entry, 800 * and a page entry and page directory to the page they refer to. 801 */ 802static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) 803{ 804 pte_t __pte; 805 pte_val(__pte) = physpage + pgprot_val(pgprot); 806 return __pte; 807} 808 809static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) 810{ 811 unsigned long physpage = page_to_phys(page); 812 813 return mk_pte_phys(physpage, pgprot); 814} 815 816#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) 817#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 818#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) 819#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1)) 820 821#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) 822#define pgd_offset_k(address) pgd_offset(&init_mm, address) 823 824#ifndef __s390x__ 825 826#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) 827#define pud_deref(pmd) ({ BUG(); 0UL; }) 828#define pgd_deref(pmd) ({ BUG(); 0UL; }) 829 830#define pud_offset(pgd, address) ((pud_t *) pgd) 831#define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address)) 832 833#else /* __s390x__ */ 834 835#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) 836#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN) 837#define pgd_deref(pgd) ({ BUG(); 0UL; }) 838 839#define pud_offset(pgd, address) ((pud_t *) pgd) 840 841static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) 842{ 843 pmd_t *pmd = (pmd_t *) pud_deref(*pud); 844 return pmd + pmd_index(address); 845} 846 847#endif /* __s390x__ */ 848 849#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot)) 850#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) 851#define pte_page(x) pfn_to_page(pte_pfn(x)) 852 853#define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) 854 855/* Find an entry in the lowest level page table.. */ 856#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr)) 857#define pte_offset_kernel(pmd, address) pte_offset(pmd,address) 858#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) 859#define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address) 860#define pte_unmap(pte) do { } while (0) 861#define pte_unmap_nested(pte) do { } while (0) 862 863/* 864 * 31 bit swap entry format: 865 * A page-table entry has some bits we have to treat in a special way. 866 * Bits 0, 20 and bit 23 have to be zero, otherwise an specification 867 * exception will occur instead of a page translation exception. The 868 * specifiation exception has the bad habit not to store necessary 869 * information in the lowcore. 870 * Bit 21 and bit 22 are the page invalid bit and the page protection 871 * bit. We set both to indicate a swapped page. 872 * Bit 30 and 31 are used to distinguish the different page types. For 873 * a swapped page these bits need to be zero. 874 * This leaves the bits 1-19 and bits 24-29 to store type and offset. 875 * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19 876 * plus 24 for the offset. 877 * 0| offset |0110|o|type |00| 878 * 0 0000000001111111111 2222 2 22222 33 879 * 0 1234567890123456789 0123 4 56789 01 880 * 881 * 64 bit swap entry format: 882 * A page-table entry has some bits we have to treat in a special way. 883 * Bits 52 and bit 55 have to be zero, otherwise an specification 884 * exception will occur instead of a page translation exception. The 885 * specifiation exception has the bad habit not to store necessary 886 * information in the lowcore. 887 * Bit 53 and bit 54 are the page invalid bit and the page protection 888 * bit. We set both to indicate a swapped page. 889 * Bit 62 and 63 are used to distinguish the different page types. For 890 * a swapped page these bits need to be zero. 891 * This leaves the bits 0-51 and bits 56-61 to store type and offset. 892 * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51 893 * plus 56 for the offset. 894 * | offset |0110|o|type |00| 895 * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66 896 * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23 897 */ 898#ifndef __s390x__ 899#define __SWP_OFFSET_MASK (~0UL >> 12) 900#else 901#define __SWP_OFFSET_MASK (~0UL >> 11) 902#endif 903static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) 904{ 905 pte_t pte; 906 offset &= __SWP_OFFSET_MASK; 907 pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) | 908 ((offset & 1UL) << 7) | ((offset & ~1UL) << 11); 909 return pte; 910} 911 912#define __swp_type(entry) (((entry).val >> 2) & 0x1f) 913#define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1)) 914#define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) }) 915 916#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 917#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 918 919#ifndef __s390x__ 920# define PTE_FILE_MAX_BITS 26 921#else /* __s390x__ */ 922# define PTE_FILE_MAX_BITS 59 923#endif /* __s390x__ */ 924 925#define pte_to_pgoff(__pte) \ 926 ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f)) 927 928#define pgoff_to_pte(__off) \ 929 ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \ 930 | _PAGE_TYPE_FILE }) 931 932#endif /* !__ASSEMBLY__ */ 933 934#define kern_addr_valid(addr) (1) 935 936extern int add_shared_memory(unsigned long start, unsigned long size); 937extern int remove_shared_memory(unsigned long start, unsigned long size); 938 939/* 940 * No page table caches to initialise 941 */ 942#define pgtable_cache_init() do { } while (0) 943 944#define __HAVE_ARCH_MEMMAP_INIT 945extern void memmap_init(unsigned long, int, unsigned long, unsigned long); 946 947#include <asm-generic/pgtable.h> 948 949#endif /* _S390_PAGE_H */