at v6.19 1289 lines 32 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2012 Regents of the University of California 4 */ 5 6#ifndef _ASM_RISCV_PGTABLE_H 7#define _ASM_RISCV_PGTABLE_H 8 9#include <linux/mmzone.h> 10#include <linux/sizes.h> 11 12#include <asm/pgtable-bits.h> 13 14#ifndef CONFIG_MMU 15#ifdef CONFIG_RELOCATABLE 16#define KERNEL_LINK_ADDR UL(0) 17#else 18#define KERNEL_LINK_ADDR _AC(CONFIG_PHYS_RAM_BASE, UL) 19#endif 20#define KERN_VIRT_SIZE (UL(-1)) 21#else 22 23#define ADDRESS_SPACE_END (UL(-1)) 24 25#ifdef CONFIG_64BIT 26/* Leave 2GB for kernel and BPF at the end of the address space */ 27#define KERNEL_LINK_ADDR (ADDRESS_SPACE_END - SZ_2G + 1) 28#else 29#define KERNEL_LINK_ADDR PAGE_OFFSET 30#endif 31 32/* Number of entries in the page global directory */ 33#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t)) 34/* Number of entries in the page table */ 35#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t)) 36 37/* 38 * Half of the kernel address space (1/4 of the entries of the page global 39 * directory) is for the direct mapping. 40 */ 41#define KERN_VIRT_SIZE ((PTRS_PER_PGD / 2 * PGDIR_SIZE) / 2) 42 43#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) 44#define VMALLOC_END PAGE_OFFSET 45#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE) 46 47#define BPF_JIT_REGION_SIZE (SZ_128M) 48#ifdef CONFIG_64BIT 49#define BPF_JIT_REGION_START (BPF_JIT_REGION_END - BPF_JIT_REGION_SIZE) 50#define BPF_JIT_REGION_END (MODULES_END) 51#else 52#define BPF_JIT_REGION_START (PAGE_OFFSET - BPF_JIT_REGION_SIZE) 53#define BPF_JIT_REGION_END (VMALLOC_END) 54#endif 55 56/* Modules always live before the kernel */ 57#ifdef CONFIG_64BIT 58/* This is used to define the end of the KASAN shadow region */ 59#define MODULES_LOWEST_VADDR (KERNEL_LINK_ADDR - SZ_2G) 60#define MODULES_VADDR (PFN_ALIGN((unsigned long)&_end) - SZ_2G) 61#define MODULES_END (PFN_ALIGN((unsigned long)&_start)) 62#else 63#define MODULES_VADDR VMALLOC_START 64#define MODULES_END VMALLOC_END 65#endif 66 67/* 68 * Roughly size the vmemmap space to be large enough to fit enough 69 * struct pages to map half the virtual address space. Then 70 * position vmemmap directly below the VMALLOC region. 71 */ 72#define VA_BITS_SV32 32 73#ifdef CONFIG_64BIT 74#define VA_BITS_SV39 39 75#define VA_BITS_SV48 48 76#define VA_BITS_SV57 57 77 78#define VA_BITS (pgtable_l5_enabled ? \ 79 VA_BITS_SV57 : (pgtable_l4_enabled ? VA_BITS_SV48 : VA_BITS_SV39)) 80#else 81#define VA_BITS VA_BITS_SV32 82#endif 83 84#define VMEMMAP_SHIFT \ 85 (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT) 86#define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT) 87#define VMEMMAP_END VMALLOC_START 88#define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE) 89 90/* 91 * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel 92 * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled. 93 */ 94#define vmemmap ((struct page *)VMEMMAP_START - vmemmap_start_pfn) 95 96#define PCI_IO_SIZE SZ_16M 97#define PCI_IO_END VMEMMAP_START 98#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) 99 100#define FIXADDR_TOP PCI_IO_START 101#ifdef CONFIG_64BIT 102#define MAX_FDT_SIZE PMD_SIZE 103#define FIX_FDT_SIZE (MAX_FDT_SIZE + SZ_2M) 104#define FIXADDR_SIZE (PMD_SIZE + FIX_FDT_SIZE) 105#else 106#define MAX_FDT_SIZE PGDIR_SIZE 107#define FIX_FDT_SIZE MAX_FDT_SIZE 108#define FIXADDR_SIZE (PGDIR_SIZE + FIX_FDT_SIZE) 109#endif 110#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) 111 112#endif 113 114#ifndef __ASSEMBLER__ 115 116#include <asm/page.h> 117#include <asm/tlbflush.h> 118#include <linux/mm_types.h> 119#include <asm/compat.h> 120#include <asm/cpufeature.h> 121 122#define __page_val_to_pfn(_val) (((_val) & _PAGE_PFN_MASK) >> _PAGE_PFN_SHIFT) 123 124#ifdef CONFIG_64BIT 125#include <asm/pgtable-64.h> 126 127#define MMAP_VA_BITS_64 ((VA_BITS >= VA_BITS_SV48) ? VA_BITS_SV48 : VA_BITS) 128#define MMAP_MIN_VA_BITS_64 (VA_BITS_SV39) 129#define MMAP_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_VA_BITS_64) 130#define MMAP_MIN_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_MIN_VA_BITS_64) 131#else 132#include <asm/pgtable-32.h> 133#endif /* CONFIG_64BIT */ 134 135#include <linux/page_table_check.h> 136 137#ifdef CONFIG_XIP_KERNEL 138#define XIP_FIXUP(addr) ({ \ 139 extern char _sdata[], _start[], _end[]; \ 140 uintptr_t __rom_start_data = CONFIG_XIP_PHYS_ADDR \ 141 + (uintptr_t)&_sdata - (uintptr_t)&_start; \ 142 uintptr_t __rom_end_data = CONFIG_XIP_PHYS_ADDR \ 143 + (uintptr_t)&_end - (uintptr_t)&_start; \ 144 uintptr_t __a = (uintptr_t)(addr); \ 145 (__a >= __rom_start_data && __a < __rom_end_data) ? \ 146 __a - __rom_start_data + CONFIG_PHYS_RAM_BASE : __a; \ 147 }) 148#else 149#define XIP_FIXUP(addr) (addr) 150#endif /* CONFIG_XIP_KERNEL */ 151 152struct pt_alloc_ops { 153 pte_t *(*get_pte_virt)(phys_addr_t pa); 154 phys_addr_t (*alloc_pte)(uintptr_t va); 155#ifndef __PAGETABLE_PMD_FOLDED 156 pmd_t *(*get_pmd_virt)(phys_addr_t pa); 157 phys_addr_t (*alloc_pmd)(uintptr_t va); 158 pud_t *(*get_pud_virt)(phys_addr_t pa); 159 phys_addr_t (*alloc_pud)(uintptr_t va); 160 p4d_t *(*get_p4d_virt)(phys_addr_t pa); 161 phys_addr_t (*alloc_p4d)(uintptr_t va); 162#endif 163}; 164 165extern struct pt_alloc_ops pt_ops __meminitdata; 166 167#ifdef CONFIG_MMU 168/* Number of PGD entries that a user-mode program can use */ 169#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) 170 171/* Page protection bits */ 172#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER) 173 174#define PAGE_NONE __pgprot(_PAGE_PROT_NONE | _PAGE_READ) 175#define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ) 176#define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE) 177#define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC) 178#define PAGE_READ_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC) 179#define PAGE_WRITE_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | \ 180 _PAGE_EXEC | _PAGE_WRITE) 181 182#define PAGE_COPY PAGE_READ 183#define PAGE_COPY_EXEC PAGE_READ_EXEC 184#define PAGE_SHARED PAGE_WRITE 185#define PAGE_SHARED_EXEC PAGE_WRITE_EXEC 186 187#define _PAGE_KERNEL (_PAGE_READ \ 188 | _PAGE_WRITE \ 189 | _PAGE_PRESENT \ 190 | _PAGE_ACCESSED \ 191 | _PAGE_DIRTY \ 192 | _PAGE_GLOBAL) 193 194#define PAGE_KERNEL __pgprot(_PAGE_KERNEL) 195#define PAGE_KERNEL_READ __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE) 196#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC) 197#define PAGE_KERNEL_READ_EXEC __pgprot((_PAGE_KERNEL & ~_PAGE_WRITE) \ 198 | _PAGE_EXEC) 199 200#define PAGE_TABLE __pgprot(_PAGE_TABLE) 201 202#define _PAGE_KERNEL_NC ((_PAGE_KERNEL & ~_PAGE_MTMASK) | _PAGE_NOCACHE) 203#define _PAGE_IOREMAP ((_PAGE_KERNEL & ~_PAGE_MTMASK) | _PAGE_IO) 204#define PAGE_KERNEL_IO __pgprot(_PAGE_IOREMAP) 205 206extern pgd_t swapper_pg_dir[]; 207extern pgd_t trampoline_pg_dir[]; 208extern pgd_t early_pg_dir[]; 209 210#ifdef CONFIG_TRANSPARENT_HUGEPAGE 211static inline int pmd_present(pmd_t pmd) 212{ 213 /* 214 * Checking for _PAGE_LEAF is needed too because: 215 * When splitting a THP, split_huge_page() will temporarily clear 216 * the present bit, in this situation, pmd_present() and 217 * pmd_trans_huge() still needs to return true. 218 */ 219 return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE | _PAGE_LEAF)); 220} 221#else 222static inline int pmd_present(pmd_t pmd) 223{ 224 return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); 225} 226#endif 227 228static inline int pmd_none(pmd_t pmd) 229{ 230 return (pmd_val(pmd) == 0); 231} 232 233static inline int pmd_bad(pmd_t pmd) 234{ 235 return !pmd_present(pmd) || (pmd_val(pmd) & _PAGE_LEAF); 236} 237 238#define pmd_leaf pmd_leaf 239static inline bool pmd_leaf(pmd_t pmd) 240{ 241 return pmd_present(pmd) && (pmd_val(pmd) & _PAGE_LEAF); 242} 243 244static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) 245{ 246 WRITE_ONCE(*pmdp, pmd); 247} 248 249static inline void pmd_clear(pmd_t *pmdp) 250{ 251 set_pmd(pmdp, __pmd(0)); 252} 253 254static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot) 255{ 256 unsigned long prot_val = pgprot_val(prot); 257 258 ALT_THEAD_PMA(prot_val); 259 260 return __pgd((pfn << _PAGE_PFN_SHIFT) | prot_val); 261} 262 263static inline unsigned long _pgd_pfn(pgd_t pgd) 264{ 265 return __page_val_to_pfn(pgd_val(pgd)); 266} 267 268static inline struct page *pmd_page(pmd_t pmd) 269{ 270 return pfn_to_page(__page_val_to_pfn(pmd_val(pmd))); 271} 272 273static inline unsigned long pmd_page_vaddr(pmd_t pmd) 274{ 275 return (unsigned long)pfn_to_virt(__page_val_to_pfn(pmd_val(pmd))); 276} 277 278static inline pte_t pmd_pte(pmd_t pmd) 279{ 280 return __pte(pmd_val(pmd)); 281} 282 283static inline pte_t pud_pte(pud_t pud) 284{ 285 return __pte(pud_val(pud)); 286} 287 288#ifdef CONFIG_RISCV_ISA_SVNAPOT 289 290static __always_inline bool has_svnapot(void) 291{ 292 return riscv_has_extension_likely(RISCV_ISA_EXT_SVNAPOT); 293} 294 295static inline unsigned long pte_napot(pte_t pte) 296{ 297 return pte_val(pte) & _PAGE_NAPOT; 298} 299 300static inline pte_t pte_mknapot(pte_t pte, unsigned int order) 301{ 302 int pos = order - 1 + _PAGE_PFN_SHIFT; 303 unsigned long napot_bit = BIT(pos); 304 unsigned long napot_mask = ~GENMASK(pos, _PAGE_PFN_SHIFT); 305 306 return __pte((pte_val(pte) & napot_mask) | napot_bit | _PAGE_NAPOT); 307} 308 309#else 310 311static __always_inline bool has_svnapot(void) { return false; } 312 313static inline unsigned long pte_napot(pte_t pte) 314{ 315 return 0; 316} 317 318#endif /* CONFIG_RISCV_ISA_SVNAPOT */ 319 320/* Yields the page frame number (PFN) of a page table entry */ 321static inline unsigned long pte_pfn(pte_t pte) 322{ 323 unsigned long res = __page_val_to_pfn(pte_val(pte)); 324 325 if (has_svnapot() && pte_napot(pte)) 326 res = res & (res - 1UL); 327 328 return res; 329} 330 331#define pte_page(x) pfn_to_page(pte_pfn(x)) 332 333/* Constructs a page table entry */ 334static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) 335{ 336 unsigned long prot_val = pgprot_val(prot); 337 338 ALT_THEAD_PMA(prot_val); 339 340 return __pte((pfn << _PAGE_PFN_SHIFT) | prot_val); 341} 342 343#define pte_pgprot pte_pgprot 344static inline pgprot_t pte_pgprot(pte_t pte) 345{ 346 unsigned long pfn = pte_pfn(pte); 347 348 return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte)); 349} 350 351static inline int pte_present(pte_t pte) 352{ 353 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); 354} 355 356#define pte_accessible pte_accessible 357static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a) 358{ 359 if (pte_val(a) & _PAGE_PRESENT) 360 return true; 361 362 if ((pte_val(a) & _PAGE_PROT_NONE) && 363 atomic_read(&mm->tlb_flush_pending)) 364 return true; 365 366 return false; 367} 368 369static inline int pte_none(pte_t pte) 370{ 371 return (pte_val(pte) == 0); 372} 373 374static inline int pte_write(pte_t pte) 375{ 376 return pte_val(pte) & _PAGE_WRITE; 377} 378 379static inline int pte_exec(pte_t pte) 380{ 381 return pte_val(pte) & _PAGE_EXEC; 382} 383 384static inline int pte_user(pte_t pte) 385{ 386 return pte_val(pte) & _PAGE_USER; 387} 388 389static inline int pte_huge(pte_t pte) 390{ 391 return pte_present(pte) && (pte_val(pte) & _PAGE_LEAF); 392} 393 394static inline int pte_dirty(pte_t pte) 395{ 396 return pte_val(pte) & _PAGE_DIRTY; 397} 398 399static inline int pte_young(pte_t pte) 400{ 401 return pte_val(pte) & _PAGE_ACCESSED; 402} 403 404static inline int pte_special(pte_t pte) 405{ 406 return pte_val(pte) & _PAGE_SPECIAL; 407} 408 409/* static inline pte_t pte_rdprotect(pte_t pte) */ 410 411static inline pte_t pte_wrprotect(pte_t pte) 412{ 413 return __pte(pte_val(pte) & ~(_PAGE_WRITE)); 414} 415 416#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP 417#define pgtable_supports_uffd_wp() \ 418 riscv_has_extension_unlikely(RISCV_ISA_EXT_SVRSW60T59B) 419 420static inline bool pte_uffd_wp(pte_t pte) 421{ 422 return !!(pte_val(pte) & _PAGE_UFFD_WP); 423} 424 425static inline pte_t pte_mkuffd_wp(pte_t pte) 426{ 427 return pte_wrprotect(__pte(pte_val(pte) | _PAGE_UFFD_WP)); 428} 429 430static inline pte_t pte_clear_uffd_wp(pte_t pte) 431{ 432 return __pte(pte_val(pte) & ~(_PAGE_UFFD_WP)); 433} 434 435static inline bool pte_swp_uffd_wp(pte_t pte) 436{ 437 return !!(pte_val(pte) & _PAGE_SWP_UFFD_WP); 438} 439 440static inline pte_t pte_swp_mkuffd_wp(pte_t pte) 441{ 442 return __pte(pte_val(pte) | _PAGE_SWP_UFFD_WP); 443} 444 445static inline pte_t pte_swp_clear_uffd_wp(pte_t pte) 446{ 447 return __pte(pte_val(pte) & ~(_PAGE_SWP_UFFD_WP)); 448} 449#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */ 450 451/* static inline pte_t pte_mkread(pte_t pte) */ 452 453static inline pte_t pte_mkwrite_novma(pte_t pte) 454{ 455 return __pte(pte_val(pte) | _PAGE_WRITE); 456} 457 458/* static inline pte_t pte_mkexec(pte_t pte) */ 459 460static inline pte_t pte_mkdirty(pte_t pte) 461{ 462 return __pte(pte_val(pte) | _PAGE_DIRTY | _PAGE_SOFT_DIRTY); 463} 464 465static inline pte_t pte_mkclean(pte_t pte) 466{ 467 return __pte(pte_val(pte) & ~(_PAGE_DIRTY)); 468} 469 470static inline pte_t pte_mkyoung(pte_t pte) 471{ 472 return __pte(pte_val(pte) | _PAGE_ACCESSED); 473} 474 475static inline pte_t pte_mkold(pte_t pte) 476{ 477 return __pte(pte_val(pte) & ~(_PAGE_ACCESSED)); 478} 479 480static inline pte_t pte_mkspecial(pte_t pte) 481{ 482 return __pte(pte_val(pte) | _PAGE_SPECIAL); 483} 484 485static inline pte_t pte_mkhuge(pte_t pte) 486{ 487 return pte; 488} 489 490#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY 491#define pgtable_supports_soft_dirty() \ 492 (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && \ 493 riscv_has_extension_unlikely(RISCV_ISA_EXT_SVRSW60T59B)) 494 495static inline bool pte_soft_dirty(pte_t pte) 496{ 497 return !!(pte_val(pte) & _PAGE_SOFT_DIRTY); 498} 499 500static inline pte_t pte_mksoft_dirty(pte_t pte) 501{ 502 return __pte(pte_val(pte) | _PAGE_SOFT_DIRTY); 503} 504 505static inline pte_t pte_clear_soft_dirty(pte_t pte) 506{ 507 return __pte(pte_val(pte) & ~(_PAGE_SOFT_DIRTY)); 508} 509 510static inline bool pte_swp_soft_dirty(pte_t pte) 511{ 512 return !!(pte_val(pte) & _PAGE_SWP_SOFT_DIRTY); 513} 514 515static inline pte_t pte_swp_mksoft_dirty(pte_t pte) 516{ 517 return __pte(pte_val(pte) | _PAGE_SWP_SOFT_DIRTY); 518} 519 520static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) 521{ 522 return __pte(pte_val(pte) & ~(_PAGE_SWP_SOFT_DIRTY)); 523} 524#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ 525 526#ifdef CONFIG_RISCV_ISA_SVNAPOT 527#define pte_leaf_size(pte) (pte_napot(pte) ? \ 528 napot_cont_size(napot_cont_order(pte)) :\ 529 PAGE_SIZE) 530#endif 531 532#ifdef CONFIG_NUMA_BALANCING 533/* 534 * See the comment in include/asm-generic/pgtable.h 535 */ 536static inline int pte_protnone(pte_t pte) 537{ 538 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)) == _PAGE_PROT_NONE; 539} 540 541static inline int pmd_protnone(pmd_t pmd) 542{ 543 return pte_protnone(pmd_pte(pmd)); 544} 545#endif 546 547/* Modify page protection bits */ 548static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 549{ 550 unsigned long newprot_val = pgprot_val(newprot); 551 552 ALT_THEAD_PMA(newprot_val); 553 554 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | newprot_val); 555} 556 557#define pgd_ERROR(e) \ 558 pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e)) 559 560 561/* Commit new configuration to MMU hardware */ 562static inline void update_mmu_cache_range(struct vm_fault *vmf, 563 struct vm_area_struct *vma, unsigned long address, 564 pte_t *ptep, unsigned int nr) 565{ 566 /* 567 * Svvptc guarantees that the new valid pte will be visible within 568 * a bounded timeframe, so when the uarch does not cache invalid 569 * entries, we don't have to do anything. 570 */ 571 if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SVVPTC)) 572 return; 573 574 /* 575 * The kernel assumes that TLBs don't cache invalid entries, but 576 * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a 577 * cache flush; it is necessary even after writing invalid entries. 578 * Relying on flush_tlb_fix_spurious_fault would suffice, but 579 * the extra traps reduce performance. So, eagerly SFENCE.VMA. 580 */ 581 while (nr--) 582 local_flush_tlb_page(address + nr * PAGE_SIZE); 583 584} 585#define update_mmu_cache(vma, addr, ptep) \ 586 update_mmu_cache_range(NULL, vma, addr, ptep, 1) 587 588#define update_mmu_tlb_range(vma, addr, ptep, nr) \ 589 update_mmu_cache_range(NULL, vma, addr, ptep, nr) 590 591static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, 592 unsigned long address, pmd_t *pmdp) 593{ 594 pte_t *ptep = (pte_t *)pmdp; 595 596 update_mmu_cache(vma, address, ptep); 597} 598 599#define __HAVE_ARCH_PTE_SAME 600static inline int pte_same(pte_t pte_a, pte_t pte_b) 601{ 602 return pte_val(pte_a) == pte_val(pte_b); 603} 604 605/* 606 * Certain architectures need to do special things when PTEs within 607 * a page table are directly modified. Thus, the following hook is 608 * made available. 609 */ 610static inline void set_pte(pte_t *ptep, pte_t pteval) 611{ 612 WRITE_ONCE(*ptep, pteval); 613} 614 615void flush_icache_pte(struct mm_struct *mm, pte_t pte); 616 617static inline void __set_pte_at(struct mm_struct *mm, pte_t *ptep, pte_t pteval) 618{ 619 if (pte_present(pteval) && pte_exec(pteval)) 620 flush_icache_pte(mm, pteval); 621 622 set_pte(ptep, pteval); 623} 624 625#define PFN_PTE_SHIFT _PAGE_PFN_SHIFT 626 627static inline void set_ptes(struct mm_struct *mm, unsigned long addr, 628 pte_t *ptep, pte_t pteval, unsigned int nr) 629{ 630 page_table_check_ptes_set(mm, ptep, pteval, nr); 631 632 for (;;) { 633 __set_pte_at(mm, ptep, pteval); 634 if (--nr == 0) 635 break; 636 ptep++; 637 pte_val(pteval) += 1 << _PAGE_PFN_SHIFT; 638 } 639} 640#define set_ptes set_ptes 641 642static inline void pte_clear(struct mm_struct *mm, 643 unsigned long addr, pte_t *ptep) 644{ 645 __set_pte_at(mm, ptep, __pte(0)); 646} 647 648#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS /* defined in mm/pgtable.c */ 649extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, 650 pte_t *ptep, pte_t entry, int dirty); 651#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG /* defined in mm/pgtable.c */ 652extern int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long address, 653 pte_t *ptep); 654 655#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 656static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 657 unsigned long address, pte_t *ptep) 658{ 659#ifdef CONFIG_SMP 660 pte_t pte = __pte(xchg(&ptep->pte, 0)); 661#else 662 pte_t pte = *ptep; 663 664 set_pte(ptep, __pte(0)); 665#endif 666 667 page_table_check_pte_clear(mm, pte); 668 669 return pte; 670} 671 672#define __HAVE_ARCH_PTEP_SET_WRPROTECT 673static inline void ptep_set_wrprotect(struct mm_struct *mm, 674 unsigned long address, pte_t *ptep) 675{ 676 atomic_long_and(~(unsigned long)_PAGE_WRITE, (atomic_long_t *)ptep); 677} 678 679#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 680static inline int ptep_clear_flush_young(struct vm_area_struct *vma, 681 unsigned long address, pte_t *ptep) 682{ 683 /* 684 * This comment is borrowed from x86, but applies equally to RISC-V: 685 * 686 * Clearing the accessed bit without a TLB flush 687 * doesn't cause data corruption. [ It could cause incorrect 688 * page aging and the (mistaken) reclaim of hot pages, but the 689 * chance of that should be relatively low. ] 690 * 691 * So as a performance optimization don't flush the TLB when 692 * clearing the accessed bit, it will eventually be flushed by 693 * a context switch or a VM operation anyway. [ In the rare 694 * event of it not getting flushed for a long time the delay 695 * shouldn't really matter because there's no real memory 696 * pressure for swapout to react to. ] 697 */ 698 return ptep_test_and_clear_young(vma, address, ptep); 699} 700 701#define pgprot_nx pgprot_nx 702static inline pgprot_t pgprot_nx(pgprot_t _prot) 703{ 704 return __pgprot(pgprot_val(_prot) & ~_PAGE_EXEC); 705} 706 707#define pgprot_noncached pgprot_noncached 708static inline pgprot_t pgprot_noncached(pgprot_t _prot) 709{ 710 unsigned long prot = pgprot_val(_prot); 711 712 prot &= ~_PAGE_MTMASK; 713 prot |= _PAGE_IO; 714 715 return __pgprot(prot); 716} 717 718#define pgprot_writecombine pgprot_writecombine 719static inline pgprot_t pgprot_writecombine(pgprot_t _prot) 720{ 721 unsigned long prot = pgprot_val(_prot); 722 723 prot &= ~_PAGE_MTMASK; 724 prot |= _PAGE_NOCACHE; 725 726 return __pgprot(prot); 727} 728 729#define pgprot_dmacoherent pgprot_writecombine 730 731/* 732 * Both Svade and Svadu control the hardware behavior when the PTE A/D bits need to be set. By 733 * default the M-mode firmware enables the hardware updating scheme when only Svadu is present in 734 * DT. 735 */ 736#define arch_has_hw_pte_young arch_has_hw_pte_young 737static inline bool arch_has_hw_pte_young(void) 738{ 739 return riscv_has_extension_unlikely(RISCV_ISA_EXT_SVADU); 740} 741 742/* 743 * THP functions 744 */ 745static inline pmd_t pte_pmd(pte_t pte) 746{ 747 return __pmd(pte_val(pte)); 748} 749 750static inline pud_t pte_pud(pte_t pte) 751{ 752 return __pud(pte_val(pte)); 753} 754 755static inline pmd_t pmd_mkhuge(pmd_t pmd) 756{ 757 return pmd; 758} 759 760static inline pmd_t pmd_mkinvalid(pmd_t pmd) 761{ 762 return __pmd(pmd_val(pmd) & ~(_PAGE_PRESENT|_PAGE_PROT_NONE)); 763} 764 765#define __pmd_to_phys(pmd) (__page_val_to_pfn(pmd_val(pmd)) << PAGE_SHIFT) 766 767static inline unsigned long pmd_pfn(pmd_t pmd) 768{ 769 return ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT); 770} 771 772#define __pud_to_phys(pud) (__page_val_to_pfn(pud_val(pud)) << PAGE_SHIFT) 773 774#define pud_pfn pud_pfn 775static inline unsigned long pud_pfn(pud_t pud) 776{ 777 return ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT); 778} 779 780#define pmd_pgprot pmd_pgprot 781static inline pgprot_t pmd_pgprot(pmd_t pmd) 782{ 783 return pte_pgprot(pmd_pte(pmd)); 784} 785 786#define pud_pgprot pud_pgprot 787static inline pgprot_t pud_pgprot(pud_t pud) 788{ 789 return pte_pgprot(pud_pte(pud)); 790} 791 792static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 793{ 794 return pte_pmd(pte_modify(pmd_pte(pmd), newprot)); 795} 796 797#define pmd_write pmd_write 798static inline int pmd_write(pmd_t pmd) 799{ 800 return pte_write(pmd_pte(pmd)); 801} 802 803#define pud_write pud_write 804static inline int pud_write(pud_t pud) 805{ 806 return pte_write(pud_pte(pud)); 807} 808 809#define pmd_dirty pmd_dirty 810static inline int pmd_dirty(pmd_t pmd) 811{ 812 return pte_dirty(pmd_pte(pmd)); 813} 814 815#define pmd_young pmd_young 816static inline int pmd_young(pmd_t pmd) 817{ 818 return pte_young(pmd_pte(pmd)); 819} 820 821static inline int pmd_user(pmd_t pmd) 822{ 823 return pte_user(pmd_pte(pmd)); 824} 825 826static inline pmd_t pmd_mkold(pmd_t pmd) 827{ 828 return pte_pmd(pte_mkold(pmd_pte(pmd))); 829} 830 831static inline pmd_t pmd_mkyoung(pmd_t pmd) 832{ 833 return pte_pmd(pte_mkyoung(pmd_pte(pmd))); 834} 835 836static inline pmd_t pmd_mkwrite_novma(pmd_t pmd) 837{ 838 return pte_pmd(pte_mkwrite_novma(pmd_pte(pmd))); 839} 840 841static inline pmd_t pmd_wrprotect(pmd_t pmd) 842{ 843 return pte_pmd(pte_wrprotect(pmd_pte(pmd))); 844} 845 846static inline pmd_t pmd_mkclean(pmd_t pmd) 847{ 848 return pte_pmd(pte_mkclean(pmd_pte(pmd))); 849} 850 851static inline pmd_t pmd_mkdirty(pmd_t pmd) 852{ 853 return pte_pmd(pte_mkdirty(pmd_pte(pmd))); 854} 855 856#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP 857static inline bool pmd_special(pmd_t pmd) 858{ 859 return pte_special(pmd_pte(pmd)); 860} 861 862static inline pmd_t pmd_mkspecial(pmd_t pmd) 863{ 864 return pte_pmd(pte_mkspecial(pmd_pte(pmd))); 865} 866#endif 867 868#ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP 869static inline bool pud_special(pud_t pud) 870{ 871 return pte_special(pud_pte(pud)); 872} 873 874static inline pud_t pud_mkspecial(pud_t pud) 875{ 876 return pte_pud(pte_mkspecial(pud_pte(pud))); 877} 878#endif 879 880#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP 881static inline bool pmd_uffd_wp(pmd_t pmd) 882{ 883 return pte_uffd_wp(pmd_pte(pmd)); 884} 885 886static inline pmd_t pmd_mkuffd_wp(pmd_t pmd) 887{ 888 return pte_pmd(pte_mkuffd_wp(pmd_pte(pmd))); 889} 890 891static inline pmd_t pmd_clear_uffd_wp(pmd_t pmd) 892{ 893 return pte_pmd(pte_clear_uffd_wp(pmd_pte(pmd))); 894} 895 896static inline bool pmd_swp_uffd_wp(pmd_t pmd) 897{ 898 return pte_swp_uffd_wp(pmd_pte(pmd)); 899} 900 901static inline pmd_t pmd_swp_mkuffd_wp(pmd_t pmd) 902{ 903 return pte_pmd(pte_swp_mkuffd_wp(pmd_pte(pmd))); 904} 905 906static inline pmd_t pmd_swp_clear_uffd_wp(pmd_t pmd) 907{ 908 return pte_pmd(pte_swp_clear_uffd_wp(pmd_pte(pmd))); 909} 910#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */ 911 912#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY 913static inline bool pmd_soft_dirty(pmd_t pmd) 914{ 915 return pte_soft_dirty(pmd_pte(pmd)); 916} 917 918static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) 919{ 920 return pte_pmd(pte_mksoft_dirty(pmd_pte(pmd))); 921} 922 923static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) 924{ 925 return pte_pmd(pte_clear_soft_dirty(pmd_pte(pmd))); 926} 927 928#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 929static inline bool pmd_swp_soft_dirty(pmd_t pmd) 930{ 931 return pte_swp_soft_dirty(pmd_pte(pmd)); 932} 933 934static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd) 935{ 936 return pte_pmd(pte_swp_mksoft_dirty(pmd_pte(pmd))); 937} 938 939static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd) 940{ 941 return pte_pmd(pte_swp_clear_soft_dirty(pmd_pte(pmd))); 942} 943#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ 944#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ 945 946static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 947 pmd_t *pmdp, pmd_t pmd) 948{ 949 page_table_check_pmd_set(mm, pmdp, pmd); 950 return __set_pte_at(mm, (pte_t *)pmdp, pmd_pte(pmd)); 951} 952 953static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, 954 pud_t *pudp, pud_t pud) 955{ 956 page_table_check_pud_set(mm, pudp, pud); 957 return __set_pte_at(mm, (pte_t *)pudp, pud_pte(pud)); 958} 959 960#ifdef CONFIG_PAGE_TABLE_CHECK 961static inline bool pte_user_accessible_page(pte_t pte) 962{ 963 return pte_present(pte) && pte_user(pte); 964} 965 966static inline bool pmd_user_accessible_page(pmd_t pmd) 967{ 968 return pmd_leaf(pmd) && pmd_user(pmd); 969} 970 971static inline bool pud_user_accessible_page(pud_t pud) 972{ 973 return pud_leaf(pud) && pud_user(pud); 974} 975#endif 976 977#ifdef CONFIG_TRANSPARENT_HUGEPAGE 978static inline int pmd_trans_huge(pmd_t pmd) 979{ 980 return pmd_leaf(pmd); 981} 982 983#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 984static inline int pmdp_set_access_flags(struct vm_area_struct *vma, 985 unsigned long address, pmd_t *pmdp, 986 pmd_t entry, int dirty) 987{ 988 return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty); 989} 990 991#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG 992static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, 993 unsigned long address, pmd_t *pmdp) 994{ 995 return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp); 996} 997 998#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR 999static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, 1000 unsigned long address, pmd_t *pmdp) 1001{ 1002#ifdef CONFIG_SMP 1003 pmd_t pmd = __pmd(xchg(&pmdp->pmd, 0)); 1004#else 1005 pmd_t pmd = *pmdp; 1006 1007 pmd_clear(pmdp); 1008#endif 1009 1010 page_table_check_pmd_clear(mm, pmd); 1011 1012 return pmd; 1013} 1014 1015#define __HAVE_ARCH_PMDP_SET_WRPROTECT 1016static inline void pmdp_set_wrprotect(struct mm_struct *mm, 1017 unsigned long address, pmd_t *pmdp) 1018{ 1019 ptep_set_wrprotect(mm, address, (pte_t *)pmdp); 1020} 1021 1022#define pmdp_establish pmdp_establish 1023static inline pmd_t pmdp_establish(struct vm_area_struct *vma, 1024 unsigned long address, pmd_t *pmdp, pmd_t pmd) 1025{ 1026 page_table_check_pmd_set(vma->vm_mm, pmdp, pmd); 1027 return __pmd(atomic_long_xchg((atomic_long_t *)pmdp, pmd_val(pmd))); 1028} 1029 1030#define pmdp_collapse_flush pmdp_collapse_flush 1031extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, 1032 unsigned long address, pmd_t *pmdp); 1033 1034static inline pud_t pud_wrprotect(pud_t pud) 1035{ 1036 return pte_pud(pte_wrprotect(pud_pte(pud))); 1037} 1038 1039static inline int pud_trans_huge(pud_t pud) 1040{ 1041 return pud_leaf(pud); 1042} 1043 1044static inline int pud_dirty(pud_t pud) 1045{ 1046 return pte_dirty(pud_pte(pud)); 1047} 1048 1049static inline pud_t pud_mkyoung(pud_t pud) 1050{ 1051 return pte_pud(pte_mkyoung(pud_pte(pud))); 1052} 1053 1054static inline pud_t pud_mkold(pud_t pud) 1055{ 1056 return pte_pud(pte_mkold(pud_pte(pud))); 1057} 1058 1059static inline pud_t pud_mkdirty(pud_t pud) 1060{ 1061 return pte_pud(pte_mkdirty(pud_pte(pud))); 1062} 1063 1064static inline pud_t pud_mkclean(pud_t pud) 1065{ 1066 return pte_pud(pte_mkclean(pud_pte(pud))); 1067} 1068 1069static inline pud_t pud_mkwrite(pud_t pud) 1070{ 1071 return pte_pud(pte_mkwrite_novma(pud_pte(pud))); 1072} 1073 1074static inline pud_t pud_mkhuge(pud_t pud) 1075{ 1076 return pud; 1077} 1078 1079static inline int pudp_set_access_flags(struct vm_area_struct *vma, 1080 unsigned long address, pud_t *pudp, 1081 pud_t entry, int dirty) 1082{ 1083 return ptep_set_access_flags(vma, address, (pte_t *)pudp, pud_pte(entry), dirty); 1084} 1085 1086static inline int pudp_test_and_clear_young(struct vm_area_struct *vma, 1087 unsigned long address, pud_t *pudp) 1088{ 1089 return ptep_test_and_clear_young(vma, address, (pte_t *)pudp); 1090} 1091 1092#define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR 1093static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm, 1094 unsigned long address, pud_t *pudp) 1095{ 1096#ifdef CONFIG_SMP 1097 pud_t pud = __pud(xchg(&pudp->pud, 0)); 1098#else 1099 pud_t pud = *pudp; 1100 1101 pud_clear(pudp); 1102#endif 1103 1104 page_table_check_pud_clear(mm, pud); 1105 1106 return pud; 1107} 1108 1109static inline int pud_young(pud_t pud) 1110{ 1111 return pte_young(pud_pte(pud)); 1112} 1113 1114static inline void update_mmu_cache_pud(struct vm_area_struct *vma, 1115 unsigned long address, pud_t *pudp) 1116{ 1117 pte_t *ptep = (pte_t *)pudp; 1118 1119 update_mmu_cache(vma, address, ptep); 1120} 1121 1122static inline pud_t pudp_establish(struct vm_area_struct *vma, 1123 unsigned long address, pud_t *pudp, pud_t pud) 1124{ 1125 page_table_check_pud_set(vma->vm_mm, pudp, pud); 1126 return __pud(atomic_long_xchg((atomic_long_t *)pudp, pud_val(pud))); 1127} 1128 1129static inline pud_t pud_mkinvalid(pud_t pud) 1130{ 1131 return __pud(pud_val(pud) & ~(_PAGE_PRESENT | _PAGE_PROT_NONE)); 1132} 1133 1134extern pud_t pudp_invalidate(struct vm_area_struct *vma, unsigned long address, 1135 pud_t *pudp); 1136 1137static inline pud_t pud_modify(pud_t pud, pgprot_t newprot) 1138{ 1139 return pte_pud(pte_modify(pud_pte(pud), newprot)); 1140} 1141 1142#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1143 1144/* 1145 * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that 1146 * are !pte_none() && !pte_present(). 1147 * 1148 * Format of swap PTE: 1149 * bit 0: _PAGE_PRESENT (zero) 1150 * bit 1 to 2: (zero) 1151 * bit 3: _PAGE_SWP_SOFT_DIRTY 1152 * bit 4: _PAGE_SWP_UFFD_WP 1153 * bit 5: _PAGE_PROT_NONE (zero) 1154 * bit 6: exclusive marker 1155 * bits 7 to 11: swap type 1156 * bits 12 to XLEN-1: swap offset 1157 */ 1158#define __SWP_TYPE_SHIFT 7 1159#define __SWP_TYPE_BITS 5 1160#define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1) 1161#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) 1162 1163#define MAX_SWAPFILES_CHECK() \ 1164 BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) 1165 1166#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) 1167#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT) 1168#define __swp_entry(type, offset) ((swp_entry_t) \ 1169 { (((type) & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT) | \ 1170 ((offset) << __SWP_OFFSET_SHIFT) }) 1171 1172#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 1173#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 1174 1175static inline bool pte_swp_exclusive(pte_t pte) 1176{ 1177 return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; 1178} 1179 1180static inline pte_t pte_swp_mkexclusive(pte_t pte) 1181{ 1182 return __pte(pte_val(pte) | _PAGE_SWP_EXCLUSIVE); 1183} 1184 1185static inline pte_t pte_swp_clear_exclusive(pte_t pte) 1186{ 1187 return __pte(pte_val(pte) & ~_PAGE_SWP_EXCLUSIVE); 1188} 1189 1190#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 1191#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) }) 1192#define __swp_entry_to_pmd(swp) __pmd((swp).val) 1193#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ 1194 1195/* 1196 * In the RV64 Linux scheme, we give the user half of the virtual-address space 1197 * and give the kernel the other (upper) half. 1198 */ 1199#ifdef CONFIG_64BIT 1200#define KERN_VIRT_START (-(BIT(VA_BITS)) + TASK_SIZE) 1201#else 1202#define KERN_VIRT_START FIXADDR_START 1203#endif 1204 1205/* 1206 * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32. 1207 * Note that PGDIR_SIZE must evenly divide TASK_SIZE. 1208 * Task size is: 1209 * - 0x9fc00000 (~2.5GB) for RV32. 1210 * - 0x4000000000 ( 256GB) for RV64 using SV39 mmu 1211 * - 0x800000000000 ( 128TB) for RV64 using SV48 mmu 1212 * - 0x100000000000000 ( 64PB) for RV64 using SV57 mmu 1213 * 1214 * Note that PGDIR_SIZE must evenly divide TASK_SIZE since "RISC-V 1215 * Instruction Set Manual Volume II: Privileged Architecture" states that 1216 * "load and store effective addresses, which are 64bits, must have bits 1217 * 63–48 all equal to bit 47, or else a page-fault exception will occur." 1218 * Similarly for SV57, bits 63–57 must be equal to bit 56. 1219 */ 1220#ifdef CONFIG_64BIT 1221#define TASK_SIZE_64 (PGDIR_SIZE * PTRS_PER_PGD / 2) 1222 1223#ifdef CONFIG_COMPAT 1224#define TASK_SIZE_32 (_AC(0x80000000, UL) - PAGE_SIZE) 1225#define TASK_SIZE (is_compat_task() ? \ 1226 TASK_SIZE_32 : TASK_SIZE_64) 1227#else 1228#define TASK_SIZE TASK_SIZE_64 1229#endif 1230 1231#else 1232#define TASK_SIZE FIXADDR_START 1233#endif 1234 1235#else /* CONFIG_MMU */ 1236 1237#define PAGE_SHARED __pgprot(0) 1238#define PAGE_KERNEL __pgprot(0) 1239#define swapper_pg_dir NULL 1240#define TASK_SIZE _AC(-1, UL) 1241#define VMALLOC_START _AC(0, UL) 1242#define VMALLOC_END TASK_SIZE 1243 1244#endif /* !CONFIG_MMU */ 1245 1246extern char _start[]; 1247extern void *_dtb_early_va; 1248extern uintptr_t _dtb_early_pa; 1249#if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_MMU) 1250#define dtb_early_va (*(void **)XIP_FIXUP(&_dtb_early_va)) 1251#define dtb_early_pa (*(uintptr_t *)XIP_FIXUP(&_dtb_early_pa)) 1252#else 1253#define dtb_early_va _dtb_early_va 1254#define dtb_early_pa _dtb_early_pa 1255#endif /* CONFIG_XIP_KERNEL */ 1256extern u64 satp_mode; 1257 1258void paging_init(void); 1259void misc_mem_init(void); 1260 1261/* 1262 * ZERO_PAGE is a global shared page that is always zero, 1263 * used for zero-mapped memory areas, etc. 1264 */ 1265extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; 1266#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 1267 1268/* 1269 * Use set_p*_safe(), and elide TLB flushing, when confident that *no* 1270 * TLB flush will be required as a result of the "set". For example, use 1271 * in scenarios where it is known ahead of time that the routine is 1272 * setting non-present entries, or re-setting an existing entry to the 1273 * same value. Otherwise, use the typical "set" helpers and flush the 1274 * TLB. 1275 */ 1276#define set_p4d_safe(p4dp, p4d) \ 1277({ \ 1278 WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \ 1279 set_p4d(p4dp, p4d); \ 1280}) 1281 1282#define set_pgd_safe(pgdp, pgd) \ 1283({ \ 1284 WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \ 1285 set_pgd(pgdp, pgd); \ 1286}) 1287#endif /* !__ASSEMBLER__ */ 1288 1289#endif /* _ASM_RISCV_PGTABLE_H */