at v6.0 831 lines 21 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2012 Regents of the University of California 4 */ 5 6#ifndef _ASM_RISCV_PGTABLE_H 7#define _ASM_RISCV_PGTABLE_H 8 9#include <linux/mmzone.h> 10#include <linux/sizes.h> 11 12#include <asm/pgtable-bits.h> 13 14#ifndef CONFIG_MMU 15#define KERNEL_LINK_ADDR PAGE_OFFSET 16#define KERN_VIRT_SIZE (UL(-1)) 17#else 18 19#define ADDRESS_SPACE_END (UL(-1)) 20 21#ifdef CONFIG_64BIT 22/* Leave 2GB for kernel and BPF at the end of the address space */ 23#define KERNEL_LINK_ADDR (ADDRESS_SPACE_END - SZ_2G + 1) 24#else 25#define KERNEL_LINK_ADDR PAGE_OFFSET 26#endif 27 28/* Number of entries in the page global directory */ 29#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t)) 30/* Number of entries in the page table */ 31#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t)) 32 33/* 34 * Half of the kernel address space (half of the entries of the page global 35 * directory) is for the direct mapping. 36 */ 37#define KERN_VIRT_SIZE ((PTRS_PER_PGD / 2 * PGDIR_SIZE) / 2) 38 39#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) 40#define VMALLOC_END PAGE_OFFSET 41#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE) 42 43#define BPF_JIT_REGION_SIZE (SZ_128M) 44#ifdef CONFIG_64BIT 45#define BPF_JIT_REGION_START (BPF_JIT_REGION_END - BPF_JIT_REGION_SIZE) 46#define BPF_JIT_REGION_END (MODULES_END) 47#else 48#define BPF_JIT_REGION_START (PAGE_OFFSET - BPF_JIT_REGION_SIZE) 49#define BPF_JIT_REGION_END (VMALLOC_END) 50#endif 51 52/* Modules always live before the kernel */ 53#ifdef CONFIG_64BIT 54/* This is used to define the end of the KASAN shadow region */ 55#define MODULES_LOWEST_VADDR (KERNEL_LINK_ADDR - SZ_2G) 56#define MODULES_VADDR (PFN_ALIGN((unsigned long)&_end) - SZ_2G) 57#define MODULES_END (PFN_ALIGN((unsigned long)&_start)) 58#endif 59 60/* 61 * Roughly size the vmemmap space to be large enough to fit enough 62 * struct pages to map half the virtual address space. Then 63 * position vmemmap directly below the VMALLOC region. 64 */ 65#ifdef CONFIG_64BIT 66#define VA_BITS (pgtable_l5_enabled ? \ 67 57 : (pgtable_l4_enabled ? 48 : 39)) 68#else 69#define VA_BITS 32 70#endif 71 72#define VMEMMAP_SHIFT \ 73 (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT) 74#define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT) 75#define VMEMMAP_END VMALLOC_START 76#define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE) 77 78/* 79 * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel 80 * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled. 81 */ 82#define vmemmap ((struct page *)VMEMMAP_START) 83 84#define PCI_IO_SIZE SZ_16M 85#define PCI_IO_END VMEMMAP_START 86#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) 87 88#define FIXADDR_TOP PCI_IO_START 89#ifdef CONFIG_64BIT 90#define FIXADDR_SIZE PMD_SIZE 91#else 92#define FIXADDR_SIZE PGDIR_SIZE 93#endif 94#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) 95 96#endif 97 98#ifdef CONFIG_XIP_KERNEL 99#define XIP_OFFSET SZ_32M 100#define XIP_OFFSET_MASK (SZ_32M - 1) 101#else 102#define XIP_OFFSET 0 103#endif 104 105#ifndef __ASSEMBLY__ 106 107#include <asm/page.h> 108#include <asm/tlbflush.h> 109#include <linux/mm_types.h> 110 111#define __page_val_to_pfn(_val) (((_val) & _PAGE_PFN_MASK) >> _PAGE_PFN_SHIFT) 112 113#ifdef CONFIG_64BIT 114#include <asm/pgtable-64.h> 115#else 116#include <asm/pgtable-32.h> 117#endif /* CONFIG_64BIT */ 118 119#include <linux/page_table_check.h> 120 121#ifdef CONFIG_XIP_KERNEL 122#define XIP_FIXUP(addr) ({ \ 123 uintptr_t __a = (uintptr_t)(addr); \ 124 (__a >= CONFIG_XIP_PHYS_ADDR && \ 125 __a < CONFIG_XIP_PHYS_ADDR + XIP_OFFSET * 2) ? \ 126 __a - CONFIG_XIP_PHYS_ADDR + CONFIG_PHYS_RAM_BASE - XIP_OFFSET :\ 127 __a; \ 128 }) 129#else 130#define XIP_FIXUP(addr) (addr) 131#endif /* CONFIG_XIP_KERNEL */ 132 133struct pt_alloc_ops { 134 pte_t *(*get_pte_virt)(phys_addr_t pa); 135 phys_addr_t (*alloc_pte)(uintptr_t va); 136#ifndef __PAGETABLE_PMD_FOLDED 137 pmd_t *(*get_pmd_virt)(phys_addr_t pa); 138 phys_addr_t (*alloc_pmd)(uintptr_t va); 139 pud_t *(*get_pud_virt)(phys_addr_t pa); 140 phys_addr_t (*alloc_pud)(uintptr_t va); 141 p4d_t *(*get_p4d_virt)(phys_addr_t pa); 142 phys_addr_t (*alloc_p4d)(uintptr_t va); 143#endif 144}; 145 146extern struct pt_alloc_ops pt_ops __initdata; 147 148#ifdef CONFIG_MMU 149/* Number of PGD entries that a user-mode program can use */ 150#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) 151 152/* Page protection bits */ 153#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER) 154 155#define PAGE_NONE __pgprot(_PAGE_PROT_NONE | _PAGE_READ) 156#define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ) 157#define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE) 158#define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC) 159#define PAGE_READ_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC) 160#define PAGE_WRITE_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | \ 161 _PAGE_EXEC | _PAGE_WRITE) 162 163#define PAGE_COPY PAGE_READ 164#define PAGE_COPY_EXEC PAGE_EXEC 165#define PAGE_COPY_READ_EXEC PAGE_READ_EXEC 166#define PAGE_SHARED PAGE_WRITE 167#define PAGE_SHARED_EXEC PAGE_WRITE_EXEC 168 169#define _PAGE_KERNEL (_PAGE_READ \ 170 | _PAGE_WRITE \ 171 | _PAGE_PRESENT \ 172 | _PAGE_ACCESSED \ 173 | _PAGE_DIRTY \ 174 | _PAGE_GLOBAL) 175 176#define PAGE_KERNEL __pgprot(_PAGE_KERNEL) 177#define PAGE_KERNEL_READ __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE) 178#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC) 179#define PAGE_KERNEL_READ_EXEC __pgprot((_PAGE_KERNEL & ~_PAGE_WRITE) \ 180 | _PAGE_EXEC) 181 182#define PAGE_TABLE __pgprot(_PAGE_TABLE) 183 184#define _PAGE_IOREMAP ((_PAGE_KERNEL & ~_PAGE_MTMASK) | _PAGE_IO) 185#define PAGE_KERNEL_IO __pgprot(_PAGE_IOREMAP) 186 187extern pgd_t swapper_pg_dir[]; 188 189#ifdef CONFIG_TRANSPARENT_HUGEPAGE 190static inline int pmd_present(pmd_t pmd) 191{ 192 /* 193 * Checking for _PAGE_LEAF is needed too because: 194 * When splitting a THP, split_huge_page() will temporarily clear 195 * the present bit, in this situation, pmd_present() and 196 * pmd_trans_huge() still needs to return true. 197 */ 198 return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE | _PAGE_LEAF)); 199} 200#else 201static inline int pmd_present(pmd_t pmd) 202{ 203 return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); 204} 205#endif 206 207static inline int pmd_none(pmd_t pmd) 208{ 209 return (pmd_val(pmd) == 0); 210} 211 212static inline int pmd_bad(pmd_t pmd) 213{ 214 return !pmd_present(pmd) || (pmd_val(pmd) & _PAGE_LEAF); 215} 216 217#define pmd_leaf pmd_leaf 218static inline int pmd_leaf(pmd_t pmd) 219{ 220 return pmd_present(pmd) && (pmd_val(pmd) & _PAGE_LEAF); 221} 222 223static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) 224{ 225 *pmdp = pmd; 226} 227 228static inline void pmd_clear(pmd_t *pmdp) 229{ 230 set_pmd(pmdp, __pmd(0)); 231} 232 233static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot) 234{ 235 unsigned long prot_val = pgprot_val(prot); 236 237 ALT_THEAD_PMA(prot_val); 238 239 return __pgd((pfn << _PAGE_PFN_SHIFT) | prot_val); 240} 241 242static inline unsigned long _pgd_pfn(pgd_t pgd) 243{ 244 return __page_val_to_pfn(pgd_val(pgd)); 245} 246 247static inline struct page *pmd_page(pmd_t pmd) 248{ 249 return pfn_to_page(__page_val_to_pfn(pmd_val(pmd))); 250} 251 252static inline unsigned long pmd_page_vaddr(pmd_t pmd) 253{ 254 return (unsigned long)pfn_to_virt(__page_val_to_pfn(pmd_val(pmd))); 255} 256 257static inline pte_t pmd_pte(pmd_t pmd) 258{ 259 return __pte(pmd_val(pmd)); 260} 261 262static inline pte_t pud_pte(pud_t pud) 263{ 264 return __pte(pud_val(pud)); 265} 266 267/* Yields the page frame number (PFN) of a page table entry */ 268static inline unsigned long pte_pfn(pte_t pte) 269{ 270 return __page_val_to_pfn(pte_val(pte)); 271} 272 273#define pte_page(x) pfn_to_page(pte_pfn(x)) 274 275/* Constructs a page table entry */ 276static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) 277{ 278 unsigned long prot_val = pgprot_val(prot); 279 280 ALT_THEAD_PMA(prot_val); 281 282 return __pte((pfn << _PAGE_PFN_SHIFT) | prot_val); 283} 284 285#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) 286 287static inline int pte_present(pte_t pte) 288{ 289 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); 290} 291 292static inline int pte_none(pte_t pte) 293{ 294 return (pte_val(pte) == 0); 295} 296 297static inline int pte_write(pte_t pte) 298{ 299 return pte_val(pte) & _PAGE_WRITE; 300} 301 302static inline int pte_exec(pte_t pte) 303{ 304 return pte_val(pte) & _PAGE_EXEC; 305} 306 307static inline int pte_user(pte_t pte) 308{ 309 return pte_val(pte) & _PAGE_USER; 310} 311 312static inline int pte_huge(pte_t pte) 313{ 314 return pte_present(pte) && (pte_val(pte) & _PAGE_LEAF); 315} 316 317static inline int pte_dirty(pte_t pte) 318{ 319 return pte_val(pte) & _PAGE_DIRTY; 320} 321 322static inline int pte_young(pte_t pte) 323{ 324 return pte_val(pte) & _PAGE_ACCESSED; 325} 326 327static inline int pte_special(pte_t pte) 328{ 329 return pte_val(pte) & _PAGE_SPECIAL; 330} 331 332/* static inline pte_t pte_rdprotect(pte_t pte) */ 333 334static inline pte_t pte_wrprotect(pte_t pte) 335{ 336 return __pte(pte_val(pte) & ~(_PAGE_WRITE)); 337} 338 339/* static inline pte_t pte_mkread(pte_t pte) */ 340 341static inline pte_t pte_mkwrite(pte_t pte) 342{ 343 return __pte(pte_val(pte) | _PAGE_WRITE); 344} 345 346/* static inline pte_t pte_mkexec(pte_t pte) */ 347 348static inline pte_t pte_mkdirty(pte_t pte) 349{ 350 return __pte(pte_val(pte) | _PAGE_DIRTY); 351} 352 353static inline pte_t pte_mkclean(pte_t pte) 354{ 355 return __pte(pte_val(pte) & ~(_PAGE_DIRTY)); 356} 357 358static inline pte_t pte_mkyoung(pte_t pte) 359{ 360 return __pte(pte_val(pte) | _PAGE_ACCESSED); 361} 362 363static inline pte_t pte_mkold(pte_t pte) 364{ 365 return __pte(pte_val(pte) & ~(_PAGE_ACCESSED)); 366} 367 368static inline pte_t pte_mkspecial(pte_t pte) 369{ 370 return __pte(pte_val(pte) | _PAGE_SPECIAL); 371} 372 373static inline pte_t pte_mkhuge(pte_t pte) 374{ 375 return pte; 376} 377 378#ifdef CONFIG_NUMA_BALANCING 379/* 380 * See the comment in include/asm-generic/pgtable.h 381 */ 382static inline int pte_protnone(pte_t pte) 383{ 384 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)) == _PAGE_PROT_NONE; 385} 386 387static inline int pmd_protnone(pmd_t pmd) 388{ 389 return pte_protnone(pmd_pte(pmd)); 390} 391#endif 392 393/* Modify page protection bits */ 394static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 395{ 396 unsigned long newprot_val = pgprot_val(newprot); 397 398 ALT_THEAD_PMA(newprot_val); 399 400 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | newprot_val); 401} 402 403#define pgd_ERROR(e) \ 404 pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e)) 405 406 407/* Commit new configuration to MMU hardware */ 408static inline void update_mmu_cache(struct vm_area_struct *vma, 409 unsigned long address, pte_t *ptep) 410{ 411 /* 412 * The kernel assumes that TLBs don't cache invalid entries, but 413 * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a 414 * cache flush; it is necessary even after writing invalid entries. 415 * Relying on flush_tlb_fix_spurious_fault would suffice, but 416 * the extra traps reduce performance. So, eagerly SFENCE.VMA. 417 */ 418 local_flush_tlb_page(address); 419} 420 421static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, 422 unsigned long address, pmd_t *pmdp) 423{ 424 pte_t *ptep = (pte_t *)pmdp; 425 426 update_mmu_cache(vma, address, ptep); 427} 428 429#define __HAVE_ARCH_PTE_SAME 430static inline int pte_same(pte_t pte_a, pte_t pte_b) 431{ 432 return pte_val(pte_a) == pte_val(pte_b); 433} 434 435/* 436 * Certain architectures need to do special things when PTEs within 437 * a page table are directly modified. Thus, the following hook is 438 * made available. 439 */ 440static inline void set_pte(pte_t *ptep, pte_t pteval) 441{ 442 *ptep = pteval; 443} 444 445void flush_icache_pte(pte_t pte); 446 447static inline void __set_pte_at(struct mm_struct *mm, 448 unsigned long addr, pte_t *ptep, pte_t pteval) 449{ 450 if (pte_present(pteval) && pte_exec(pteval)) 451 flush_icache_pte(pteval); 452 453 set_pte(ptep, pteval); 454} 455 456static inline void set_pte_at(struct mm_struct *mm, 457 unsigned long addr, pte_t *ptep, pte_t pteval) 458{ 459 page_table_check_pte_set(mm, addr, ptep, pteval); 460 __set_pte_at(mm, addr, ptep, pteval); 461} 462 463static inline void pte_clear(struct mm_struct *mm, 464 unsigned long addr, pte_t *ptep) 465{ 466 __set_pte_at(mm, addr, ptep, __pte(0)); 467} 468 469#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 470static inline int ptep_set_access_flags(struct vm_area_struct *vma, 471 unsigned long address, pte_t *ptep, 472 pte_t entry, int dirty) 473{ 474 if (!pte_same(*ptep, entry)) 475 set_pte_at(vma->vm_mm, address, ptep, entry); 476 /* 477 * update_mmu_cache will unconditionally execute, handling both 478 * the case that the PTE changed and the spurious fault case. 479 */ 480 return true; 481} 482 483#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 484static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 485 unsigned long address, pte_t *ptep) 486{ 487 pte_t pte = __pte(atomic_long_xchg((atomic_long_t *)ptep, 0)); 488 489 page_table_check_pte_clear(mm, address, pte); 490 491 return pte; 492} 493 494#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 495static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, 496 unsigned long address, 497 pte_t *ptep) 498{ 499 if (!pte_young(*ptep)) 500 return 0; 501 return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep)); 502} 503 504#define __HAVE_ARCH_PTEP_SET_WRPROTECT 505static inline void ptep_set_wrprotect(struct mm_struct *mm, 506 unsigned long address, pte_t *ptep) 507{ 508 atomic_long_and(~(unsigned long)_PAGE_WRITE, (atomic_long_t *)ptep); 509} 510 511#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 512static inline int ptep_clear_flush_young(struct vm_area_struct *vma, 513 unsigned long address, pte_t *ptep) 514{ 515 /* 516 * This comment is borrowed from x86, but applies equally to RISC-V: 517 * 518 * Clearing the accessed bit without a TLB flush 519 * doesn't cause data corruption. [ It could cause incorrect 520 * page aging and the (mistaken) reclaim of hot pages, but the 521 * chance of that should be relatively low. ] 522 * 523 * So as a performance optimization don't flush the TLB when 524 * clearing the accessed bit, it will eventually be flushed by 525 * a context switch or a VM operation anyway. [ In the rare 526 * event of it not getting flushed for a long time the delay 527 * shouldn't really matter because there's no real memory 528 * pressure for swapout to react to. ] 529 */ 530 return ptep_test_and_clear_young(vma, address, ptep); 531} 532 533#define pgprot_noncached pgprot_noncached 534static inline pgprot_t pgprot_noncached(pgprot_t _prot) 535{ 536 unsigned long prot = pgprot_val(_prot); 537 538 prot &= ~_PAGE_MTMASK; 539 prot |= _PAGE_IO; 540 541 return __pgprot(prot); 542} 543 544#define pgprot_writecombine pgprot_writecombine 545static inline pgprot_t pgprot_writecombine(pgprot_t _prot) 546{ 547 unsigned long prot = pgprot_val(_prot); 548 549 prot &= ~_PAGE_MTMASK; 550 prot |= _PAGE_NOCACHE; 551 552 return __pgprot(prot); 553} 554 555/* 556 * THP functions 557 */ 558static inline pmd_t pte_pmd(pte_t pte) 559{ 560 return __pmd(pte_val(pte)); 561} 562 563static inline pmd_t pmd_mkhuge(pmd_t pmd) 564{ 565 return pmd; 566} 567 568static inline pmd_t pmd_mkinvalid(pmd_t pmd) 569{ 570 return __pmd(pmd_val(pmd) & ~(_PAGE_PRESENT|_PAGE_PROT_NONE)); 571} 572 573#define __pmd_to_phys(pmd) (__page_val_to_pfn(pmd_val(pmd)) << PAGE_SHIFT) 574 575static inline unsigned long pmd_pfn(pmd_t pmd) 576{ 577 return ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT); 578} 579 580#define __pud_to_phys(pud) (__page_val_to_pfn(pud_val(pud)) << PAGE_SHIFT) 581 582static inline unsigned long pud_pfn(pud_t pud) 583{ 584 return ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT); 585} 586 587static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 588{ 589 return pte_pmd(pte_modify(pmd_pte(pmd), newprot)); 590} 591 592#define pmd_write pmd_write 593static inline int pmd_write(pmd_t pmd) 594{ 595 return pte_write(pmd_pte(pmd)); 596} 597 598static inline int pmd_dirty(pmd_t pmd) 599{ 600 return pte_dirty(pmd_pte(pmd)); 601} 602 603static inline int pmd_young(pmd_t pmd) 604{ 605 return pte_young(pmd_pte(pmd)); 606} 607 608static inline int pmd_user(pmd_t pmd) 609{ 610 return pte_user(pmd_pte(pmd)); 611} 612 613static inline pmd_t pmd_mkold(pmd_t pmd) 614{ 615 return pte_pmd(pte_mkold(pmd_pte(pmd))); 616} 617 618static inline pmd_t pmd_mkyoung(pmd_t pmd) 619{ 620 return pte_pmd(pte_mkyoung(pmd_pte(pmd))); 621} 622 623static inline pmd_t pmd_mkwrite(pmd_t pmd) 624{ 625 return pte_pmd(pte_mkwrite(pmd_pte(pmd))); 626} 627 628static inline pmd_t pmd_wrprotect(pmd_t pmd) 629{ 630 return pte_pmd(pte_wrprotect(pmd_pte(pmd))); 631} 632 633static inline pmd_t pmd_mkclean(pmd_t pmd) 634{ 635 return pte_pmd(pte_mkclean(pmd_pte(pmd))); 636} 637 638static inline pmd_t pmd_mkdirty(pmd_t pmd) 639{ 640 return pte_pmd(pte_mkdirty(pmd_pte(pmd))); 641} 642 643static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 644 pmd_t *pmdp, pmd_t pmd) 645{ 646 page_table_check_pmd_set(mm, addr, pmdp, pmd); 647 return __set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd)); 648} 649 650static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, 651 pud_t *pudp, pud_t pud) 652{ 653 page_table_check_pud_set(mm, addr, pudp, pud); 654 return __set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud)); 655} 656 657#ifdef CONFIG_PAGE_TABLE_CHECK 658static inline bool pte_user_accessible_page(pte_t pte) 659{ 660 return pte_present(pte) && pte_user(pte); 661} 662 663static inline bool pmd_user_accessible_page(pmd_t pmd) 664{ 665 return pmd_leaf(pmd) && pmd_user(pmd); 666} 667 668static inline bool pud_user_accessible_page(pud_t pud) 669{ 670 return pud_leaf(pud) && pud_user(pud); 671} 672#endif 673 674#ifdef CONFIG_TRANSPARENT_HUGEPAGE 675static inline int pmd_trans_huge(pmd_t pmd) 676{ 677 return pmd_leaf(pmd); 678} 679 680#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 681static inline int pmdp_set_access_flags(struct vm_area_struct *vma, 682 unsigned long address, pmd_t *pmdp, 683 pmd_t entry, int dirty) 684{ 685 return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty); 686} 687 688#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG 689static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, 690 unsigned long address, pmd_t *pmdp) 691{ 692 return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp); 693} 694 695#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR 696static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, 697 unsigned long address, pmd_t *pmdp) 698{ 699 pmd_t pmd = __pmd(atomic_long_xchg((atomic_long_t *)pmdp, 0)); 700 701 page_table_check_pmd_clear(mm, address, pmd); 702 703 return pmd; 704} 705 706#define __HAVE_ARCH_PMDP_SET_WRPROTECT 707static inline void pmdp_set_wrprotect(struct mm_struct *mm, 708 unsigned long address, pmd_t *pmdp) 709{ 710 ptep_set_wrprotect(mm, address, (pte_t *)pmdp); 711} 712 713#define pmdp_establish pmdp_establish 714static inline pmd_t pmdp_establish(struct vm_area_struct *vma, 715 unsigned long address, pmd_t *pmdp, pmd_t pmd) 716{ 717 page_table_check_pmd_set(vma->vm_mm, address, pmdp, pmd); 718 return __pmd(atomic_long_xchg((atomic_long_t *)pmdp, pmd_val(pmd))); 719} 720#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 721 722/* 723 * Encode and decode a swap entry 724 * 725 * Format of swap PTE: 726 * bit 0: _PAGE_PRESENT (zero) 727 * bit 1 to 3: _PAGE_LEAF (zero) 728 * bit 5: _PAGE_PROT_NONE (zero) 729 * bits 6 to 10: swap type 730 * bits 10 to XLEN-1: swap offset 731 */ 732#define __SWP_TYPE_SHIFT 6 733#define __SWP_TYPE_BITS 5 734#define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1) 735#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) 736 737#define MAX_SWAPFILES_CHECK() \ 738 BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) 739 740#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) 741#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT) 742#define __swp_entry(type, offset) ((swp_entry_t) \ 743 { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) 744 745#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 746#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 747 748#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 749#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) }) 750#define __swp_entry_to_pmd(swp) __pmd((swp).val) 751#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ 752 753/* 754 * In the RV64 Linux scheme, we give the user half of the virtual-address space 755 * and give the kernel the other (upper) half. 756 */ 757#ifdef CONFIG_64BIT 758#define KERN_VIRT_START (-(BIT(VA_BITS)) + TASK_SIZE) 759#else 760#define KERN_VIRT_START FIXADDR_START 761#endif 762 763/* 764 * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32. 765 * Note that PGDIR_SIZE must evenly divide TASK_SIZE. 766 * Task size is: 767 * - 0x9fc00000 (~2.5GB) for RV32. 768 * - 0x4000000000 ( 256GB) for RV64 using SV39 mmu 769 * - 0x800000000000 ( 128TB) for RV64 using SV48 mmu 770 * 771 * Note that PGDIR_SIZE must evenly divide TASK_SIZE since "RISC-V 772 * Instruction Set Manual Volume II: Privileged Architecture" states that 773 * "load and store effective addresses, which are 64bits, must have bits 774 * 63–48 all equal to bit 47, or else a page-fault exception will occur." 775 */ 776#ifdef CONFIG_64BIT 777#define TASK_SIZE_64 (PGDIR_SIZE * PTRS_PER_PGD / 2) 778#define TASK_SIZE_MIN (PGDIR_SIZE_L3 * PTRS_PER_PGD / 2) 779 780#ifdef CONFIG_COMPAT 781#define TASK_SIZE_32 (_AC(0x80000000, UL) - PAGE_SIZE) 782#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ 783 TASK_SIZE_32 : TASK_SIZE_64) 784#else 785#define TASK_SIZE TASK_SIZE_64 786#endif 787 788#else 789#define TASK_SIZE FIXADDR_START 790#define TASK_SIZE_MIN TASK_SIZE 791#endif 792 793#else /* CONFIG_MMU */ 794 795#define PAGE_SHARED __pgprot(0) 796#define PAGE_KERNEL __pgprot(0) 797#define swapper_pg_dir NULL 798#define TASK_SIZE 0xffffffffUL 799#define VMALLOC_START 0 800#define VMALLOC_END TASK_SIZE 801 802#endif /* !CONFIG_MMU */ 803 804#define kern_addr_valid(addr) (1) /* FIXME */ 805 806extern char _start[]; 807extern void *_dtb_early_va; 808extern uintptr_t _dtb_early_pa; 809#if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_MMU) 810#define dtb_early_va (*(void **)XIP_FIXUP(&_dtb_early_va)) 811#define dtb_early_pa (*(uintptr_t *)XIP_FIXUP(&_dtb_early_pa)) 812#else 813#define dtb_early_va _dtb_early_va 814#define dtb_early_pa _dtb_early_pa 815#endif /* CONFIG_XIP_KERNEL */ 816extern u64 satp_mode; 817extern bool pgtable_l4_enabled; 818 819void paging_init(void); 820void misc_mem_init(void); 821 822/* 823 * ZERO_PAGE is a global shared page that is always zero, 824 * used for zero-mapped memory areas, etc. 825 */ 826extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; 827#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 828 829#endif /* !__ASSEMBLY__ */ 830 831#endif /* _ASM_RISCV_PGTABLE_H */