at master 1923 lines 57 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2012 ARM Ltd. 4 */ 5#ifndef __ASM_PGTABLE_H 6#define __ASM_PGTABLE_H 7 8#include <asm/bug.h> 9#include <asm/proc-fns.h> 10 11#include <asm/memory.h> 12#include <asm/mte.h> 13#include <asm/pgtable-hwdef.h> 14#include <asm/pgtable-prot.h> 15#include <asm/tlbflush.h> 16 17/* 18 * VMALLOC range. 19 * 20 * VMALLOC_START: beginning of the kernel vmalloc space 21 * VMALLOC_END: extends to the available space below vmemmap 22 */ 23#define VMALLOC_START (MODULES_END) 24#if VA_BITS == VA_BITS_MIN 25#define VMALLOC_END (VMEMMAP_START - SZ_8M) 26#else 27#define VMEMMAP_UNUSED_NPAGES ((_PAGE_OFFSET(vabits_actual) - PAGE_OFFSET) >> PAGE_SHIFT) 28#define VMALLOC_END (VMEMMAP_START + VMEMMAP_UNUSED_NPAGES * sizeof(struct page) - SZ_8M) 29#endif 30 31#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)) 32 33#ifndef __ASSEMBLER__ 34 35#include <asm/cmpxchg.h> 36#include <asm/fixmap.h> 37#include <asm/por.h> 38#include <linux/mmdebug.h> 39#include <linux/mm_types.h> 40#include <linux/sched.h> 41#include <linux/page_table_check.h> 42 43static inline void emit_pte_barriers(void) 44{ 45 /* 46 * These barriers are emitted under certain conditions after a pte entry 47 * was modified (see e.g. __set_pte_complete()). The dsb makes the store 48 * visible to the table walker. The isb ensures that any previous 49 * speculative "invalid translation" marker that is in the CPU's 50 * pipeline gets cleared, so that any access to that address after 51 * setting the pte to valid won't cause a spurious fault. If the thread 52 * gets preempted after storing to the pgtable but before emitting these 53 * barriers, __switch_to() emits a dsb which ensure the walker gets to 54 * see the store. There is no guarantee of an isb being issued though. 55 * This is safe because it will still get issued (albeit on a 56 * potentially different CPU) when the thread starts running again, 57 * before any access to the address. 58 */ 59 dsb(ishst); 60 isb(); 61} 62 63static inline void queue_pte_barriers(void) 64{ 65 if (is_lazy_mmu_mode_active()) { 66 /* Avoid the atomic op if already set. */ 67 if (!test_thread_flag(TIF_LAZY_MMU_PENDING)) 68 set_thread_flag(TIF_LAZY_MMU_PENDING); 69 } else { 70 emit_pte_barriers(); 71 } 72} 73 74static inline void arch_enter_lazy_mmu_mode(void) {} 75 76static inline void arch_flush_lazy_mmu_mode(void) 77{ 78 if (test_and_clear_thread_flag(TIF_LAZY_MMU_PENDING)) 79 emit_pte_barriers(); 80} 81 82static inline void arch_leave_lazy_mmu_mode(void) 83{ 84 arch_flush_lazy_mmu_mode(); 85} 86 87#ifdef CONFIG_TRANSPARENT_HUGEPAGE 88#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE 89 90/* Set stride and tlb_level in flush_*_tlb_range */ 91#define flush_pmd_tlb_range(vma, addr, end) \ 92 __flush_tlb_range(vma, addr, end, PMD_SIZE, false, 2) 93#define flush_pud_tlb_range(vma, addr, end) \ 94 __flush_tlb_range(vma, addr, end, PUD_SIZE, false, 1) 95#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 96 97/* 98 * We use local TLB invalidation instruction when reusing page in 99 * write protection fault handler to avoid TLBI broadcast in the hot 100 * path. This will cause spurious page faults if stale read-only TLB 101 * entries exist. 102 */ 103#define flush_tlb_fix_spurious_fault(vma, address, ptep) \ 104 local_flush_tlb_page_nonotify(vma, address) 105 106#define flush_tlb_fix_spurious_fault_pmd(vma, address, pmdp) \ 107 local_flush_tlb_page_nonotify(vma, address) 108 109/* 110 * ZERO_PAGE is a global shared page that is always zero: used 111 * for zero-mapped memory areas etc.. 112 */ 113extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; 114#define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page)) 115 116#define pte_ERROR(e) \ 117 pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e)) 118 119#ifdef CONFIG_ARM64_PA_BITS_52 120static inline phys_addr_t __pte_to_phys(pte_t pte) 121{ 122 pte_val(pte) &= ~PTE_MAYBE_SHARED; 123 return (pte_val(pte) & PTE_ADDR_LOW) | 124 ((pte_val(pte) & PTE_ADDR_HIGH) << PTE_ADDR_HIGH_SHIFT); 125} 126static inline pteval_t __phys_to_pte_val(phys_addr_t phys) 127{ 128 return (phys | (phys >> PTE_ADDR_HIGH_SHIFT)) & PHYS_TO_PTE_ADDR_MASK; 129} 130#else 131static inline phys_addr_t __pte_to_phys(pte_t pte) 132{ 133 return pte_val(pte) & PTE_ADDR_LOW; 134} 135 136static inline pteval_t __phys_to_pte_val(phys_addr_t phys) 137{ 138 return phys; 139} 140#endif 141 142#define pte_pfn(pte) (__pte_to_phys(pte) >> PAGE_SHIFT) 143#define pfn_pte(pfn,prot) \ 144 __pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) 145 146#define pte_none(pte) (!pte_val(pte)) 147#define __pte_clear(mm, addr, ptep) \ 148 __set_pte(ptep, __pte(0)) 149#define pte_page(pte) (pfn_to_page(pte_pfn(pte))) 150 151/* 152 * The following only work if pte_present(). Undefined behaviour otherwise. 153 */ 154#define pte_present(pte) (pte_valid(pte) || pte_present_invalid(pte)) 155#define pte_young(pte) (!!(pte_val(pte) & PTE_AF)) 156#define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL)) 157#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE)) 158#define pte_rdonly(pte) (!!(pte_val(pte) & PTE_RDONLY)) 159#define pte_user(pte) (!!(pte_val(pte) & PTE_USER)) 160#define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN)) 161#define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT)) 162#define pte_tagged(pte) ((pte_val(pte) & PTE_ATTRINDX_MASK) == \ 163 PTE_ATTRINDX(MT_NORMAL_TAGGED)) 164 165#define pte_cont_addr_end(addr, end) \ 166({ unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK; \ 167 (__boundary - 1 < (end) - 1) ? __boundary : (end); \ 168}) 169 170#define pmd_cont_addr_end(addr, end) \ 171({ unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK; \ 172 (__boundary - 1 < (end) - 1) ? __boundary : (end); \ 173}) 174 175#define pte_hw_dirty(pte) (pte_write(pte) && !pte_rdonly(pte)) 176#define pte_sw_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY)) 177#define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte)) 178 179#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID)) 180#define pte_present_invalid(pte) \ 181 ((pte_val(pte) & (PTE_VALID | PTE_PRESENT_INVALID)) == PTE_PRESENT_INVALID) 182/* 183 * Execute-only user mappings do not have the PTE_USER bit set. All valid 184 * kernel mappings have the PTE_UXN bit set. 185 */ 186#define pte_valid_not_user(pte) \ 187 ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN)) 188/* 189 * Returns true if the pte is valid and has the contiguous bit set. 190 */ 191#define pte_valid_cont(pte) (pte_valid(pte) && pte_cont(pte)) 192/* 193 * Could the pte be present in the TLB? We must check mm_tlb_flush_pending 194 * so that we don't erroneously return false for pages that have been 195 * remapped as PROT_NONE but are yet to be flushed from the TLB. 196 * Note that we can't make any assumptions based on the state of the access 197 * flag, since __ptep_clear_flush_young() elides a DSB when invalidating the 198 * TLB. 199 */ 200#define pte_accessible(mm, pte) \ 201 (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte)) 202 203static inline bool por_el0_allows_pkey(u8 pkey, bool write, bool execute) 204{ 205 u64 por; 206 207 if (!system_supports_poe()) 208 return true; 209 210 por = read_sysreg_s(SYS_POR_EL0); 211 212 if (write) 213 return por_elx_allows_write(por, pkey); 214 215 if (execute) 216 return por_elx_allows_exec(por, pkey); 217 218 return por_elx_allows_read(por, pkey); 219} 220 221/* 222 * p??_access_permitted() is true for valid user mappings (PTE_USER 223 * bit set, subject to the write permission check). For execute-only 224 * mappings, like PROT_EXEC with EPAN (both PTE_USER and PTE_UXN bits 225 * not set) must return false. PROT_NONE mappings do not have the 226 * PTE_VALID bit set. 227 */ 228#define pte_access_permitted_no_overlay(pte, write) \ 229 (((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) && (!(write) || pte_write(pte))) 230#define pte_access_permitted(pte, write) \ 231 (pte_access_permitted_no_overlay(pte, write) && \ 232 por_el0_allows_pkey(FIELD_GET(PTE_PO_IDX_MASK, pte_val(pte)), write, false)) 233#define pmd_access_permitted(pmd, write) \ 234 (pte_access_permitted(pmd_pte(pmd), (write))) 235#define pud_access_permitted(pud, write) \ 236 (pte_access_permitted(pud_pte(pud), (write))) 237 238static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) 239{ 240 pte_val(pte) &= ~pgprot_val(prot); 241 return pte; 242} 243 244static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot) 245{ 246 pte_val(pte) |= pgprot_val(prot); 247 return pte; 248} 249 250static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot) 251{ 252 pmd_val(pmd) &= ~pgprot_val(prot); 253 return pmd; 254} 255 256static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot) 257{ 258 pmd_val(pmd) |= pgprot_val(prot); 259 return pmd; 260} 261 262static inline pte_t pte_mkwrite_novma(pte_t pte) 263{ 264 pte = set_pte_bit(pte, __pgprot(PTE_WRITE)); 265 if (pte_sw_dirty(pte)) 266 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); 267 return pte; 268} 269 270static inline pte_t pte_mkclean(pte_t pte) 271{ 272 pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY)); 273 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); 274 275 return pte; 276} 277 278static inline pte_t pte_mkdirty(pte_t pte) 279{ 280 pte = set_pte_bit(pte, __pgprot(PTE_DIRTY)); 281 282 if (pte_write(pte)) 283 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); 284 285 return pte; 286} 287 288static inline pte_t pte_wrprotect(pte_t pte) 289{ 290 /* 291 * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY 292 * clear), set the PTE_DIRTY bit. 293 */ 294 if (pte_hw_dirty(pte)) 295 pte = set_pte_bit(pte, __pgprot(PTE_DIRTY)); 296 297 pte = clear_pte_bit(pte, __pgprot(PTE_WRITE)); 298 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); 299 return pte; 300} 301 302static inline pte_t pte_mkold(pte_t pte) 303{ 304 return clear_pte_bit(pte, __pgprot(PTE_AF)); 305} 306 307static inline pte_t pte_mkyoung(pte_t pte) 308{ 309 return set_pte_bit(pte, __pgprot(PTE_AF)); 310} 311 312static inline pte_t pte_mkspecial(pte_t pte) 313{ 314 return set_pte_bit(pte, __pgprot(PTE_SPECIAL)); 315} 316 317static inline pte_t pte_mkcont(pte_t pte) 318{ 319 return set_pte_bit(pte, __pgprot(PTE_CONT)); 320} 321 322static inline pte_t pte_mknoncont(pte_t pte) 323{ 324 return clear_pte_bit(pte, __pgprot(PTE_CONT)); 325} 326 327static inline pte_t pte_mkvalid(pte_t pte) 328{ 329 return set_pte_bit(pte, __pgprot(PTE_VALID)); 330} 331 332static inline pte_t pte_mkinvalid(pte_t pte) 333{ 334 pte = set_pte_bit(pte, __pgprot(PTE_PRESENT_INVALID)); 335 pte = clear_pte_bit(pte, __pgprot(PTE_VALID)); 336 return pte; 337} 338 339static inline pmd_t pmd_mkcont(pmd_t pmd) 340{ 341 return __pmd(pmd_val(pmd) | PMD_SECT_CONT); 342} 343 344static inline pmd_t pmd_mknoncont(pmd_t pmd) 345{ 346 return __pmd(pmd_val(pmd) & ~PMD_SECT_CONT); 347} 348 349#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP 350static inline int pte_uffd_wp(pte_t pte) 351{ 352 return !!(pte_val(pte) & PTE_UFFD_WP); 353} 354 355static inline pte_t pte_mkuffd_wp(pte_t pte) 356{ 357 return pte_wrprotect(set_pte_bit(pte, __pgprot(PTE_UFFD_WP))); 358} 359 360static inline pte_t pte_clear_uffd_wp(pte_t pte) 361{ 362 return clear_pte_bit(pte, __pgprot(PTE_UFFD_WP)); 363} 364#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */ 365 366static inline void __set_pte_nosync(pte_t *ptep, pte_t pte) 367{ 368 WRITE_ONCE(*ptep, pte); 369} 370 371static inline void __set_pte_complete(pte_t pte) 372{ 373 /* 374 * Only if the new pte is valid and kernel, otherwise TLB maintenance 375 * has the necessary barriers. 376 */ 377 if (pte_valid_not_user(pte)) 378 queue_pte_barriers(); 379} 380 381static inline void __set_pte(pte_t *ptep, pte_t pte) 382{ 383 __set_pte_nosync(ptep, pte); 384 __set_pte_complete(pte); 385} 386 387static inline pte_t __ptep_get(pte_t *ptep) 388{ 389 return READ_ONCE(*ptep); 390} 391 392extern void __sync_icache_dcache(pte_t pteval); 393bool pgattr_change_is_safe(pteval_t old, pteval_t new); 394 395/* 396 * PTE bits configuration in the presence of hardware Dirty Bit Management 397 * (PTE_WRITE == PTE_DBM): 398 * 399 * Dirty Writable | PTE_RDONLY PTE_WRITE PTE_DIRTY (sw) 400 * 0 0 | 1 0 0 401 * 0 1 | 1 1 0 402 * 1 0 | 1 0 1 403 * 1 1 | 0 1 x 404 * 405 * When hardware DBM is not present, the software PTE_DIRTY bit is updated via 406 * the page fault mechanism. Checking the dirty status of a pte becomes: 407 * 408 * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY) 409 */ 410 411static inline void __check_safe_pte_update(struct mm_struct *mm, pte_t *ptep, 412 pte_t pte) 413{ 414 pte_t old_pte; 415 416 if (!IS_ENABLED(CONFIG_DEBUG_VM)) 417 return; 418 419 old_pte = __ptep_get(ptep); 420 421 if (!pte_valid(old_pte) || !pte_valid(pte)) 422 return; 423 if (mm != current->active_mm && atomic_read(&mm->mm_users) <= 1) 424 return; 425 426 /* 427 * Check for potential race with hardware updates of the pte 428 * (__ptep_set_access_flags safely changes valid ptes without going 429 * through an invalid entry). 430 */ 431 VM_WARN_ONCE(!pte_young(pte), 432 "%s: racy access flag clearing: 0x%016llx -> 0x%016llx", 433 __func__, pte_val(old_pte), pte_val(pte)); 434 VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte), 435 "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx", 436 __func__, pte_val(old_pte), pte_val(pte)); 437 VM_WARN_ONCE(!pgattr_change_is_safe(pte_val(old_pte), pte_val(pte)), 438 "%s: unsafe attribute change: 0x%016llx -> 0x%016llx", 439 __func__, pte_val(old_pte), pte_val(pte)); 440} 441 442static inline void __sync_cache_and_tags(pte_t pte, unsigned int nr_pages) 443{ 444 if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte)) 445 __sync_icache_dcache(pte); 446 447 /* 448 * If the PTE would provide user space access to the tags associated 449 * with it then ensure that the MTE tags are synchronised. Although 450 * pte_access_permitted_no_overlay() returns false for exec only 451 * mappings, they don't expose tags (instruction fetches don't check 452 * tags). 453 */ 454 if (system_supports_mte() && pte_access_permitted_no_overlay(pte, false) && 455 !pte_special(pte) && pte_tagged(pte)) 456 mte_sync_tags(pte, nr_pages); 457} 458 459/* 460 * Select all bits except the pfn 461 */ 462#define pte_pgprot pte_pgprot 463static inline pgprot_t pte_pgprot(pte_t pte) 464{ 465 unsigned long pfn = pte_pfn(pte); 466 467 return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte)); 468} 469 470#define pte_advance_pfn pte_advance_pfn 471static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr) 472{ 473 return pfn_pte(pte_pfn(pte) + nr, pte_pgprot(pte)); 474} 475 476/* 477 * Hugetlb definitions. 478 */ 479#define HUGE_MAX_HSTATE 4 480#define HPAGE_SHIFT PMD_SHIFT 481#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) 482#define HPAGE_MASK (~(HPAGE_SIZE - 1)) 483#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 484 485static inline pte_t pgd_pte(pgd_t pgd) 486{ 487 return __pte(pgd_val(pgd)); 488} 489 490static inline pte_t p4d_pte(p4d_t p4d) 491{ 492 return __pte(p4d_val(p4d)); 493} 494 495static inline pte_t pud_pte(pud_t pud) 496{ 497 return __pte(pud_val(pud)); 498} 499 500static inline pud_t pte_pud(pte_t pte) 501{ 502 return __pud(pte_val(pte)); 503} 504 505static inline pmd_t pud_pmd(pud_t pud) 506{ 507 return __pmd(pud_val(pud)); 508} 509 510static inline pte_t pmd_pte(pmd_t pmd) 511{ 512 return __pte(pmd_val(pmd)); 513} 514 515static inline pmd_t pte_pmd(pte_t pte) 516{ 517 return __pmd(pte_val(pte)); 518} 519 520static inline pgprot_t mk_pud_sect_prot(pgprot_t prot) 521{ 522 return __pgprot((pgprot_val(prot) & ~PUD_TYPE_MASK) | PUD_TYPE_SECT); 523} 524 525static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot) 526{ 527 return __pgprot((pgprot_val(prot) & ~PMD_TYPE_MASK) | PMD_TYPE_SECT); 528} 529 530static inline pte_t pte_swp_mkexclusive(pte_t pte) 531{ 532 return set_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE)); 533} 534 535static inline bool pte_swp_exclusive(pte_t pte) 536{ 537 return pte_val(pte) & PTE_SWP_EXCLUSIVE; 538} 539 540static inline pte_t pte_swp_clear_exclusive(pte_t pte) 541{ 542 return clear_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE)); 543} 544 545#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP 546static inline pte_t pte_swp_mkuffd_wp(pte_t pte) 547{ 548 return set_pte_bit(pte, __pgprot(PTE_SWP_UFFD_WP)); 549} 550 551static inline int pte_swp_uffd_wp(pte_t pte) 552{ 553 return !!(pte_val(pte) & PTE_SWP_UFFD_WP); 554} 555 556static inline pte_t pte_swp_clear_uffd_wp(pte_t pte) 557{ 558 return clear_pte_bit(pte, __pgprot(PTE_SWP_UFFD_WP)); 559} 560#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */ 561 562#ifdef CONFIG_NUMA_BALANCING 563/* 564 * See the comment in include/linux/pgtable.h 565 */ 566static inline int pte_protnone(pte_t pte) 567{ 568 /* 569 * pte_present_invalid() tells us that the pte is invalid from HW 570 * perspective but present from SW perspective, so the fields are to be 571 * interpreted as per the HW layout. The second 2 checks are the unique 572 * encoding that we use for PROT_NONE. It is insufficient to only use 573 * the first check because we share the same encoding scheme with pmds 574 * which support pmd_mkinvalid(), so can be present-invalid without 575 * being PROT_NONE. 576 */ 577 return pte_present_invalid(pte) && !pte_user(pte) && !pte_user_exec(pte); 578} 579 580static inline int pmd_protnone(pmd_t pmd) 581{ 582 return pte_protnone(pmd_pte(pmd)); 583} 584#endif 585 586#define pmd_present(pmd) pte_present(pmd_pte(pmd)) 587#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) 588#define pmd_young(pmd) pte_young(pmd_pte(pmd)) 589#define pmd_valid(pmd) pte_valid(pmd_pte(pmd)) 590#define pmd_user(pmd) pte_user(pmd_pte(pmd)) 591#define pmd_user_exec(pmd) pte_user_exec(pmd_pte(pmd)) 592#define pmd_cont(pmd) pte_cont(pmd_pte(pmd)) 593#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) 594#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) 595#define pmd_mkwrite_novma(pmd) pte_pmd(pte_mkwrite_novma(pmd_pte(pmd))) 596#define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd))) 597#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) 598#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) 599#define pmd_mkinvalid(pmd) pte_pmd(pte_mkinvalid(pmd_pte(pmd))) 600#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP 601#define pmd_uffd_wp(pmd) pte_uffd_wp(pmd_pte(pmd)) 602#define pmd_mkuffd_wp(pmd) pte_pmd(pte_mkuffd_wp(pmd_pte(pmd))) 603#define pmd_clear_uffd_wp(pmd) pte_pmd(pte_clear_uffd_wp(pmd_pte(pmd))) 604#define pmd_swp_uffd_wp(pmd) pte_swp_uffd_wp(pmd_pte(pmd)) 605#define pmd_swp_mkuffd_wp(pmd) pte_pmd(pte_swp_mkuffd_wp(pmd_pte(pmd))) 606#define pmd_swp_clear_uffd_wp(pmd) \ 607 pte_pmd(pte_swp_clear_uffd_wp(pmd_pte(pmd))) 608#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */ 609 610#define pmd_write(pmd) pte_write(pmd_pte(pmd)) 611 612static inline pmd_t pmd_mkhuge(pmd_t pmd) 613{ 614 /* 615 * It's possible that the pmd is present-invalid on entry 616 * and in that case it needs to remain present-invalid on 617 * exit. So ensure the VALID bit does not get modified. 618 */ 619 pmdval_t mask = PMD_TYPE_MASK & ~PTE_VALID; 620 pmdval_t val = PMD_TYPE_SECT & ~PTE_VALID; 621 622 return __pmd((pmd_val(pmd) & ~mask) | val); 623} 624 625#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP 626#define pmd_special(pte) (!!((pmd_val(pte) & PTE_SPECIAL))) 627static inline pmd_t pmd_mkspecial(pmd_t pmd) 628{ 629 return set_pmd_bit(pmd, __pgprot(PTE_SPECIAL)); 630} 631#endif 632 633#define __pmd_to_phys(pmd) __pte_to_phys(pmd_pte(pmd)) 634#define __phys_to_pmd_val(phys) __phys_to_pte_val(phys) 635#define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT) 636#define pfn_pmd(pfn,prot) __pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) 637 638#define pud_young(pud) pte_young(pud_pte(pud)) 639#define pud_mkyoung(pud) pte_pud(pte_mkyoung(pud_pte(pud))) 640#define pud_write(pud) pte_write(pud_pte(pud)) 641 642static inline pud_t pud_mkhuge(pud_t pud) 643{ 644 /* 645 * It's possible that the pud is present-invalid on entry 646 * and in that case it needs to remain present-invalid on 647 * exit. So ensure the VALID bit does not get modified. 648 */ 649 pudval_t mask = PUD_TYPE_MASK & ~PTE_VALID; 650 pudval_t val = PUD_TYPE_SECT & ~PTE_VALID; 651 652 return __pud((pud_val(pud) & ~mask) | val); 653} 654 655#define __pud_to_phys(pud) __pte_to_phys(pud_pte(pud)) 656#define __phys_to_pud_val(phys) __phys_to_pte_val(phys) 657#define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT) 658#define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) 659 660#define pmd_pgprot pmd_pgprot 661static inline pgprot_t pmd_pgprot(pmd_t pmd) 662{ 663 unsigned long pfn = pmd_pfn(pmd); 664 665 return __pgprot(pmd_val(pfn_pmd(pfn, __pgprot(0))) ^ pmd_val(pmd)); 666} 667 668#define pud_pgprot pud_pgprot 669static inline pgprot_t pud_pgprot(pud_t pud) 670{ 671 unsigned long pfn = pud_pfn(pud); 672 673 return __pgprot(pud_val(pfn_pud(pfn, __pgprot(0))) ^ pud_val(pud)); 674} 675 676static inline void __set_ptes_anysz(struct mm_struct *mm, unsigned long addr, 677 pte_t *ptep, pte_t pte, unsigned int nr, 678 unsigned long pgsize) 679{ 680 unsigned long stride = pgsize >> PAGE_SHIFT; 681 682 switch (pgsize) { 683 case PAGE_SIZE: 684 page_table_check_ptes_set(mm, addr, ptep, pte, nr); 685 break; 686 case PMD_SIZE: 687 page_table_check_pmds_set(mm, addr, (pmd_t *)ptep, 688 pte_pmd(pte), nr); 689 break; 690#ifndef __PAGETABLE_PMD_FOLDED 691 case PUD_SIZE: 692 page_table_check_puds_set(mm, addr, (pud_t *)ptep, 693 pte_pud(pte), nr); 694 break; 695#endif 696 default: 697 VM_WARN_ON(1); 698 } 699 700 __sync_cache_and_tags(pte, nr * stride); 701 702 for (;;) { 703 __check_safe_pte_update(mm, ptep, pte); 704 __set_pte_nosync(ptep, pte); 705 if (--nr == 0) 706 break; 707 ptep++; 708 pte = pte_advance_pfn(pte, stride); 709 } 710 711 __set_pte_complete(pte); 712} 713 714static inline void __set_ptes(struct mm_struct *mm, unsigned long addr, 715 pte_t *ptep, pte_t pte, unsigned int nr) 716{ 717 __set_ptes_anysz(mm, addr, ptep, pte, nr, PAGE_SIZE); 718} 719 720static inline void __set_pmds(struct mm_struct *mm, unsigned long addr, 721 pmd_t *pmdp, pmd_t pmd, unsigned int nr) 722{ 723 __set_ptes_anysz(mm, addr, (pte_t *)pmdp, pmd_pte(pmd), nr, PMD_SIZE); 724} 725#define set_pmd_at(mm, addr, pmdp, pmd) __set_pmds(mm, addr, pmdp, pmd, 1) 726 727static inline void __set_puds(struct mm_struct *mm, unsigned long addr, 728 pud_t *pudp, pud_t pud, unsigned int nr) 729{ 730 __set_ptes_anysz(mm, addr, (pte_t *)pudp, pud_pte(pud), nr, PUD_SIZE); 731} 732#define set_pud_at(mm, addr, pudp, pud) __set_puds(mm, addr, pudp, pud, 1) 733 734#define __p4d_to_phys(p4d) __pte_to_phys(p4d_pte(p4d)) 735#define __phys_to_p4d_val(phys) __phys_to_pte_val(phys) 736 737#define __pgd_to_phys(pgd) __pte_to_phys(pgd_pte(pgd)) 738#define __phys_to_pgd_val(phys) __phys_to_pte_val(phys) 739 740#define __pgprot_modify(prot,mask,bits) \ 741 __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) 742 743#define pgprot_nx(prot) \ 744 __pgprot_modify(prot, PTE_MAYBE_GP, PTE_PXN) 745 746#define pgprot_decrypted(prot) \ 747 __pgprot_modify(prot, PROT_NS_SHARED, PROT_NS_SHARED) 748#define pgprot_encrypted(prot) \ 749 __pgprot_modify(prot, PROT_NS_SHARED, 0) 750 751/* 752 * Mark the prot value as uncacheable and unbufferable. 753 */ 754#define pgprot_noncached(prot) \ 755 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN) 756#define pgprot_writecombine(prot) \ 757 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) 758#define pgprot_device(prot) \ 759 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN) 760#define pgprot_tagged(prot) \ 761 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_TAGGED)) 762#define pgprot_mhp pgprot_tagged 763/* 764 * DMA allocations for non-coherent devices use what the Arm architecture calls 765 * "Normal non-cacheable" memory, which permits speculation, unaligned accesses 766 * and merging of writes. This is different from "Device-nGnR[nE]" memory which 767 * is intended for MMIO and thus forbids speculation, preserves access size, 768 * requires strict alignment and can also force write responses to come from the 769 * endpoint. 770 */ 771#define pgprot_dmacoherent(prot) \ 772 __pgprot_modify(prot, PTE_ATTRINDX_MASK, \ 773 PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) 774 775#define __HAVE_PHYS_MEM_ACCESS_PROT 776struct file; 777extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 778 unsigned long size, pgprot_t vma_prot); 779 780#define pmd_none(pmd) (!pmd_val(pmd)) 781 782#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ 783 PMD_TYPE_TABLE) 784#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ 785 PMD_TYPE_SECT) 786#define pmd_leaf(pmd) (pmd_present(pmd) && !pmd_table(pmd)) 787#define pmd_bad(pmd) (!pmd_table(pmd)) 788 789#define pmd_leaf_size(pmd) (pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE) 790#define pte_leaf_size(pte) (pte_cont(pte) ? CONT_PTE_SIZE : PAGE_SIZE) 791 792#ifdef CONFIG_TRANSPARENT_HUGEPAGE 793static inline int pmd_trans_huge(pmd_t pmd) 794{ 795 /* 796 * If pmd is present-invalid, pmd_table() won't detect it 797 * as a table, so force the valid bit for the comparison. 798 */ 799 return pmd_present(pmd) && !pmd_table(__pmd(pmd_val(pmd) | PTE_VALID)); 800} 801#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 802 803#if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3 804static inline bool pud_sect(pud_t pud) { return false; } 805static inline bool pud_table(pud_t pud) { return true; } 806#else 807#define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ 808 PUD_TYPE_SECT) 809#define pud_table(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ 810 PUD_TYPE_TABLE) 811#endif 812 813extern pgd_t swapper_pg_dir[]; 814extern pgd_t idmap_pg_dir[]; 815extern pgd_t tramp_pg_dir[]; 816extern pgd_t reserved_pg_dir[]; 817 818extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd); 819 820static inline bool in_swapper_pgdir(void *addr) 821{ 822 return ((unsigned long)addr & PAGE_MASK) == 823 ((unsigned long)swapper_pg_dir & PAGE_MASK); 824} 825 826static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) 827{ 828#ifdef __PAGETABLE_PMD_FOLDED 829 if (in_swapper_pgdir(pmdp)) { 830 set_swapper_pgd((pgd_t *)pmdp, __pgd(pmd_val(pmd))); 831 return; 832 } 833#endif /* __PAGETABLE_PMD_FOLDED */ 834 835 WRITE_ONCE(*pmdp, pmd); 836 837 if (pmd_valid(pmd)) 838 queue_pte_barriers(); 839} 840 841static inline void pmd_clear(pmd_t *pmdp) 842{ 843 set_pmd(pmdp, __pmd(0)); 844} 845 846static inline phys_addr_t pmd_page_paddr(pmd_t pmd) 847{ 848 return __pmd_to_phys(pmd); 849} 850 851static inline unsigned long pmd_page_vaddr(pmd_t pmd) 852{ 853 return (unsigned long)__va(pmd_page_paddr(pmd)); 854} 855 856/* Find an entry in the third-level page table. */ 857#define pte_offset_phys(dir,addr) (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t)) 858 859#define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr)) 860#define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr)) 861#define pte_clear_fixmap() clear_fixmap(FIX_PTE) 862 863#define pmd_page(pmd) phys_to_page(__pmd_to_phys(pmd)) 864 865/* use ONLY for statically allocated translation tables */ 866#define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr)))) 867 868#if CONFIG_PGTABLE_LEVELS > 2 869 870#define pmd_ERROR(e) \ 871 pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e)) 872 873#define pud_none(pud) (!pud_val(pud)) 874#define pud_bad(pud) ((pud_val(pud) & PUD_TYPE_MASK) != \ 875 PUD_TYPE_TABLE) 876#define pud_present(pud) pte_present(pud_pte(pud)) 877#ifndef __PAGETABLE_PMD_FOLDED 878#define pud_leaf(pud) (pud_present(pud) && !pud_table(pud)) 879#else 880#define pud_leaf(pud) false 881#endif 882#define pud_valid(pud) pte_valid(pud_pte(pud)) 883#define pud_user(pud) pte_user(pud_pte(pud)) 884#define pud_user_exec(pud) pte_user_exec(pud_pte(pud)) 885 886static inline bool pgtable_l4_enabled(void); 887 888static inline void set_pud(pud_t *pudp, pud_t pud) 889{ 890 if (!pgtable_l4_enabled() && in_swapper_pgdir(pudp)) { 891 set_swapper_pgd((pgd_t *)pudp, __pgd(pud_val(pud))); 892 return; 893 } 894 895 WRITE_ONCE(*pudp, pud); 896 897 if (pud_valid(pud)) 898 queue_pte_barriers(); 899} 900 901static inline void pud_clear(pud_t *pudp) 902{ 903 set_pud(pudp, __pud(0)); 904} 905 906static inline phys_addr_t pud_page_paddr(pud_t pud) 907{ 908 return __pud_to_phys(pud); 909} 910 911static inline pmd_t *pud_pgtable(pud_t pud) 912{ 913 return (pmd_t *)__va(pud_page_paddr(pud)); 914} 915 916/* Find an entry in the second-level page table. */ 917#define pmd_offset_phys(dir, addr) (pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t)) 918 919#define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr)) 920#define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr)) 921#define pmd_clear_fixmap() clear_fixmap(FIX_PMD) 922 923#define pud_page(pud) phys_to_page(__pud_to_phys(pud)) 924 925/* use ONLY for statically allocated translation tables */ 926#define pmd_offset_kimg(dir,addr) ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr)))) 927 928#else 929 930#define pud_valid(pud) false 931#define pud_page_paddr(pud) ({ BUILD_BUG(); 0; }) 932#define pud_user_exec(pud) pud_user(pud) /* Always 0 with folding */ 933 934/* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */ 935#define pmd_set_fixmap(addr) NULL 936#define pmd_set_fixmap_offset(pudp, addr) ((pmd_t *)pudp) 937#define pmd_clear_fixmap() 938 939#define pmd_offset_kimg(dir,addr) ((pmd_t *)dir) 940 941#endif /* CONFIG_PGTABLE_LEVELS > 2 */ 942 943#if CONFIG_PGTABLE_LEVELS > 3 944 945static __always_inline bool pgtable_l4_enabled(void) 946{ 947 if (CONFIG_PGTABLE_LEVELS > 4 || !IS_ENABLED(CONFIG_ARM64_LPA2)) 948 return true; 949 if (!alternative_has_cap_likely(ARM64_ALWAYS_BOOT)) 950 return vabits_actual == VA_BITS; 951 return alternative_has_cap_unlikely(ARM64_HAS_VA52); 952} 953 954static inline bool mm_pud_folded(const struct mm_struct *mm) 955{ 956 return !pgtable_l4_enabled(); 957} 958#define mm_pud_folded mm_pud_folded 959 960#define pud_ERROR(e) \ 961 pr_err("%s:%d: bad pud %016llx.\n", __FILE__, __LINE__, pud_val(e)) 962 963#define p4d_none(p4d) (pgtable_l4_enabled() && !p4d_val(p4d)) 964#define p4d_bad(p4d) (pgtable_l4_enabled() && \ 965 ((p4d_val(p4d) & P4D_TYPE_MASK) != \ 966 P4D_TYPE_TABLE)) 967#define p4d_present(p4d) (!p4d_none(p4d)) 968 969static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) 970{ 971 if (in_swapper_pgdir(p4dp)) { 972 set_swapper_pgd((pgd_t *)p4dp, __pgd(p4d_val(p4d))); 973 return; 974 } 975 976 WRITE_ONCE(*p4dp, p4d); 977 queue_pte_barriers(); 978} 979 980static inline void p4d_clear(p4d_t *p4dp) 981{ 982 if (pgtable_l4_enabled()) 983 set_p4d(p4dp, __p4d(0)); 984} 985 986static inline phys_addr_t p4d_page_paddr(p4d_t p4d) 987{ 988 return __p4d_to_phys(p4d); 989} 990 991#define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) 992 993static inline pud_t *p4d_to_folded_pud(p4d_t *p4dp, unsigned long addr) 994{ 995 /* Ensure that 'p4dp' indexes a page table according to 'addr' */ 996 VM_BUG_ON(((addr >> P4D_SHIFT) ^ ((u64)p4dp >> 3)) % PTRS_PER_P4D); 997 998 return (pud_t *)PTR_ALIGN_DOWN(p4dp, PAGE_SIZE) + pud_index(addr); 999} 1000 1001static inline pud_t *p4d_pgtable(p4d_t p4d) 1002{ 1003 return (pud_t *)__va(p4d_page_paddr(p4d)); 1004} 1005 1006static inline phys_addr_t pud_offset_phys(p4d_t *p4dp, unsigned long addr) 1007{ 1008 BUG_ON(!pgtable_l4_enabled()); 1009 1010 return p4d_page_paddr(READ_ONCE(*p4dp)) + pud_index(addr) * sizeof(pud_t); 1011} 1012 1013static inline 1014pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long addr) 1015{ 1016 if (!pgtable_l4_enabled()) 1017 return p4d_to_folded_pud(p4dp, addr); 1018 return (pud_t *)__va(p4d_page_paddr(p4d)) + pud_index(addr); 1019} 1020#define pud_offset_lockless pud_offset_lockless 1021 1022static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long addr) 1023{ 1024 return pud_offset_lockless(p4dp, READ_ONCE(*p4dp), addr); 1025} 1026#define pud_offset pud_offset 1027 1028static inline pud_t *pud_set_fixmap(unsigned long addr) 1029{ 1030 if (!pgtable_l4_enabled()) 1031 return NULL; 1032 return (pud_t *)set_fixmap_offset(FIX_PUD, addr); 1033} 1034 1035static inline pud_t *pud_set_fixmap_offset(p4d_t *p4dp, unsigned long addr) 1036{ 1037 if (!pgtable_l4_enabled()) 1038 return p4d_to_folded_pud(p4dp, addr); 1039 return pud_set_fixmap(pud_offset_phys(p4dp, addr)); 1040} 1041 1042static inline void pud_clear_fixmap(void) 1043{ 1044 if (pgtable_l4_enabled()) 1045 clear_fixmap(FIX_PUD); 1046} 1047 1048/* use ONLY for statically allocated translation tables */ 1049static inline pud_t *pud_offset_kimg(p4d_t *p4dp, u64 addr) 1050{ 1051 if (!pgtable_l4_enabled()) 1052 return p4d_to_folded_pud(p4dp, addr); 1053 return (pud_t *)__phys_to_kimg(pud_offset_phys(p4dp, addr)); 1054} 1055 1056#define p4d_page(p4d) pfn_to_page(__phys_to_pfn(__p4d_to_phys(p4d))) 1057 1058#else 1059 1060static inline bool pgtable_l4_enabled(void) { return false; } 1061 1062#define p4d_page_paddr(p4d) ({ BUILD_BUG(); 0;}) 1063 1064/* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */ 1065#define pud_set_fixmap(addr) NULL 1066#define pud_set_fixmap_offset(pgdp, addr) ((pud_t *)pgdp) 1067#define pud_clear_fixmap() 1068 1069#define pud_offset_kimg(dir,addr) ((pud_t *)dir) 1070 1071#endif /* CONFIG_PGTABLE_LEVELS > 3 */ 1072 1073#if CONFIG_PGTABLE_LEVELS > 4 1074 1075static __always_inline bool pgtable_l5_enabled(void) 1076{ 1077 if (!alternative_has_cap_likely(ARM64_ALWAYS_BOOT)) 1078 return vabits_actual == VA_BITS; 1079 return alternative_has_cap_unlikely(ARM64_HAS_VA52); 1080} 1081 1082static inline bool mm_p4d_folded(const struct mm_struct *mm) 1083{ 1084 return !pgtable_l5_enabled(); 1085} 1086#define mm_p4d_folded mm_p4d_folded 1087 1088#define p4d_ERROR(e) \ 1089 pr_err("%s:%d: bad p4d %016llx.\n", __FILE__, __LINE__, p4d_val(e)) 1090 1091#define pgd_none(pgd) (pgtable_l5_enabled() && !pgd_val(pgd)) 1092#define pgd_bad(pgd) (pgtable_l5_enabled() && \ 1093 ((pgd_val(pgd) & PGD_TYPE_MASK) != \ 1094 PGD_TYPE_TABLE)) 1095#define pgd_present(pgd) (!pgd_none(pgd)) 1096 1097static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) 1098{ 1099 if (in_swapper_pgdir(pgdp)) { 1100 set_swapper_pgd(pgdp, __pgd(pgd_val(pgd))); 1101 return; 1102 } 1103 1104 WRITE_ONCE(*pgdp, pgd); 1105 queue_pte_barriers(); 1106} 1107 1108static inline void pgd_clear(pgd_t *pgdp) 1109{ 1110 if (pgtable_l5_enabled()) 1111 set_pgd(pgdp, __pgd(0)); 1112} 1113 1114static inline phys_addr_t pgd_page_paddr(pgd_t pgd) 1115{ 1116 return __pgd_to_phys(pgd); 1117} 1118 1119#define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1)) 1120 1121static inline p4d_t *pgd_to_folded_p4d(pgd_t *pgdp, unsigned long addr) 1122{ 1123 /* Ensure that 'pgdp' indexes a page table according to 'addr' */ 1124 VM_BUG_ON(((addr >> PGDIR_SHIFT) ^ ((u64)pgdp >> 3)) % PTRS_PER_PGD); 1125 1126 return (p4d_t *)PTR_ALIGN_DOWN(pgdp, PAGE_SIZE) + p4d_index(addr); 1127} 1128 1129static inline phys_addr_t p4d_offset_phys(pgd_t *pgdp, unsigned long addr) 1130{ 1131 BUG_ON(!pgtable_l5_enabled()); 1132 1133 return pgd_page_paddr(READ_ONCE(*pgdp)) + p4d_index(addr) * sizeof(p4d_t); 1134} 1135 1136static inline 1137p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long addr) 1138{ 1139 if (!pgtable_l5_enabled()) 1140 return pgd_to_folded_p4d(pgdp, addr); 1141 return (p4d_t *)__va(pgd_page_paddr(pgd)) + p4d_index(addr); 1142} 1143#define p4d_offset_lockless p4d_offset_lockless 1144 1145static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long addr) 1146{ 1147 return p4d_offset_lockless(pgdp, READ_ONCE(*pgdp), addr); 1148} 1149 1150static inline p4d_t *p4d_set_fixmap(unsigned long addr) 1151{ 1152 if (!pgtable_l5_enabled()) 1153 return NULL; 1154 return (p4d_t *)set_fixmap_offset(FIX_P4D, addr); 1155} 1156 1157static inline p4d_t *p4d_set_fixmap_offset(pgd_t *pgdp, unsigned long addr) 1158{ 1159 if (!pgtable_l5_enabled()) 1160 return pgd_to_folded_p4d(pgdp, addr); 1161 return p4d_set_fixmap(p4d_offset_phys(pgdp, addr)); 1162} 1163 1164static inline void p4d_clear_fixmap(void) 1165{ 1166 if (pgtable_l5_enabled()) 1167 clear_fixmap(FIX_P4D); 1168} 1169 1170/* use ONLY for statically allocated translation tables */ 1171static inline p4d_t *p4d_offset_kimg(pgd_t *pgdp, u64 addr) 1172{ 1173 if (!pgtable_l5_enabled()) 1174 return pgd_to_folded_p4d(pgdp, addr); 1175 return (p4d_t *)__phys_to_kimg(p4d_offset_phys(pgdp, addr)); 1176} 1177 1178#define pgd_page(pgd) pfn_to_page(__phys_to_pfn(__pgd_to_phys(pgd))) 1179 1180#else 1181 1182static inline bool pgtable_l5_enabled(void) { return false; } 1183 1184#define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1)) 1185 1186/* Match p4d_offset folding in <asm/generic/pgtable-nop4d.h> */ 1187#define p4d_set_fixmap(addr) NULL 1188#define p4d_set_fixmap_offset(p4dp, addr) ((p4d_t *)p4dp) 1189#define p4d_clear_fixmap() 1190 1191#define p4d_offset_kimg(dir,addr) ((p4d_t *)dir) 1192 1193static inline 1194p4d_t *p4d_offset_lockless_folded(pgd_t *pgdp, pgd_t pgd, unsigned long addr) 1195{ 1196 /* 1197 * With runtime folding of the pud, pud_offset_lockless() passes 1198 * the 'pgd_t *' we return here to p4d_to_folded_pud(), which 1199 * will offset the pointer assuming that it points into 1200 * a page-table page. However, the fast GUP path passes us a 1201 * pgd_t allocated on the stack and so we must use the original 1202 * pointer in 'pgdp' to construct the p4d pointer instead of 1203 * using the generic p4d_offset_lockless() implementation. 1204 * 1205 * Note: reusing the original pointer means that we may 1206 * dereference the same (live) page-table entry multiple times. 1207 * This is safe because it is still only loaded once in the 1208 * context of each level and the CPU guarantees same-address 1209 * read-after-read ordering. 1210 */ 1211 return p4d_offset(pgdp, addr); 1212} 1213#define p4d_offset_lockless p4d_offset_lockless_folded 1214 1215#endif /* CONFIG_PGTABLE_LEVELS > 4 */ 1216 1217#define pgd_ERROR(e) \ 1218 pr_err("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e)) 1219 1220#define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr)) 1221#define pgd_clear_fixmap() clear_fixmap(FIX_PGD) 1222 1223static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 1224{ 1225 /* 1226 * Normal and Normal-Tagged are two different memory types and indices 1227 * in MAIR_EL1. The mask below has to include PTE_ATTRINDX_MASK. 1228 */ 1229 const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | 1230 PTE_PRESENT_INVALID | PTE_VALID | PTE_WRITE | 1231 PTE_GP | PTE_ATTRINDX_MASK | PTE_PO_IDX_MASK; 1232 1233 /* preserve the hardware dirty information */ 1234 if (pte_hw_dirty(pte)) 1235 pte = set_pte_bit(pte, __pgprot(PTE_DIRTY)); 1236 1237 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); 1238 /* 1239 * If we end up clearing hw dirtiness for a sw-dirty PTE, set hardware 1240 * dirtiness again. 1241 */ 1242 if (pte_sw_dirty(pte)) 1243 pte = pte_mkdirty(pte); 1244 return pte; 1245} 1246 1247static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 1248{ 1249 return pte_pmd(pte_modify(pmd_pte(pmd), newprot)); 1250} 1251 1252extern int __ptep_set_access_flags(struct vm_area_struct *vma, 1253 unsigned long address, pte_t *ptep, 1254 pte_t entry, int dirty); 1255 1256#ifdef CONFIG_TRANSPARENT_HUGEPAGE 1257#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 1258static inline int pmdp_set_access_flags(struct vm_area_struct *vma, 1259 unsigned long address, pmd_t *pmdp, 1260 pmd_t entry, int dirty) 1261{ 1262 return __ptep_set_access_flags(vma, address, (pte_t *)pmdp, 1263 pmd_pte(entry), dirty); 1264} 1265#endif 1266 1267#ifdef CONFIG_PAGE_TABLE_CHECK 1268static inline bool pte_user_accessible_page(pte_t pte, unsigned long addr) 1269{ 1270 return pte_valid(pte) && (pte_user(pte) || pte_user_exec(pte)); 1271} 1272 1273static inline bool pmd_user_accessible_page(pmd_t pmd, unsigned long addr) 1274{ 1275 return pmd_valid(pmd) && !pmd_table(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd)); 1276} 1277 1278static inline bool pud_user_accessible_page(pud_t pud, unsigned long addr) 1279{ 1280 return pud_valid(pud) && !pud_table(pud) && (pud_user(pud) || pud_user_exec(pud)); 1281} 1282#endif 1283 1284/* 1285 * Atomic pte/pmd modifications. 1286 */ 1287static inline int __ptep_test_and_clear_young(struct vm_area_struct *vma, 1288 unsigned long address, 1289 pte_t *ptep) 1290{ 1291 pte_t old_pte, pte; 1292 1293 pte = __ptep_get(ptep); 1294 do { 1295 old_pte = pte; 1296 pte = pte_mkold(pte); 1297 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), 1298 pte_val(old_pte), pte_val(pte)); 1299 } while (pte_val(pte) != pte_val(old_pte)); 1300 1301 return pte_young(pte); 1302} 1303 1304static inline int __ptep_clear_flush_young(struct vm_area_struct *vma, 1305 unsigned long address, pte_t *ptep) 1306{ 1307 int young = __ptep_test_and_clear_young(vma, address, ptep); 1308 1309 if (young) { 1310 /* 1311 * We can elide the trailing DSB here since the worst that can 1312 * happen is that a CPU continues to use the young entry in its 1313 * TLB and we mistakenly reclaim the associated page. The 1314 * window for such an event is bounded by the next 1315 * context-switch, which provides a DSB to complete the TLB 1316 * invalidation. 1317 */ 1318 flush_tlb_page_nosync(vma, address); 1319 } 1320 1321 return young; 1322} 1323 1324#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG) 1325#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG 1326static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, 1327 unsigned long address, 1328 pmd_t *pmdp) 1329{ 1330 /* Operation applies to PMD table entry only if FEAT_HAFT is enabled */ 1331 VM_WARN_ON(pmd_table(READ_ONCE(*pmdp)) && !system_supports_haft()); 1332 return __ptep_test_and_clear_young(vma, address, (pte_t *)pmdp); 1333} 1334#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG */ 1335 1336static inline pte_t __ptep_get_and_clear_anysz(struct mm_struct *mm, 1337 unsigned long address, 1338 pte_t *ptep, 1339 unsigned long pgsize) 1340{ 1341 pte_t pte = __pte(xchg_relaxed(&pte_val(*ptep), 0)); 1342 1343 switch (pgsize) { 1344 case PAGE_SIZE: 1345 page_table_check_pte_clear(mm, address, pte); 1346 break; 1347 case PMD_SIZE: 1348 page_table_check_pmd_clear(mm, address, pte_pmd(pte)); 1349 break; 1350#ifndef __PAGETABLE_PMD_FOLDED 1351 case PUD_SIZE: 1352 page_table_check_pud_clear(mm, address, pte_pud(pte)); 1353 break; 1354#endif 1355 default: 1356 VM_WARN_ON(1); 1357 } 1358 1359 return pte; 1360} 1361 1362static inline pte_t __ptep_get_and_clear(struct mm_struct *mm, 1363 unsigned long address, pte_t *ptep) 1364{ 1365 return __ptep_get_and_clear_anysz(mm, address, ptep, PAGE_SIZE); 1366} 1367 1368static inline void __clear_full_ptes(struct mm_struct *mm, unsigned long addr, 1369 pte_t *ptep, unsigned int nr, int full) 1370{ 1371 for (;;) { 1372 __ptep_get_and_clear(mm, addr, ptep); 1373 if (--nr == 0) 1374 break; 1375 ptep++; 1376 addr += PAGE_SIZE; 1377 } 1378} 1379 1380static inline pte_t __get_and_clear_full_ptes(struct mm_struct *mm, 1381 unsigned long addr, pte_t *ptep, 1382 unsigned int nr, int full) 1383{ 1384 pte_t pte, tmp_pte; 1385 1386 pte = __ptep_get_and_clear(mm, addr, ptep); 1387 while (--nr) { 1388 ptep++; 1389 addr += PAGE_SIZE; 1390 tmp_pte = __ptep_get_and_clear(mm, addr, ptep); 1391 if (pte_dirty(tmp_pte)) 1392 pte = pte_mkdirty(pte); 1393 if (pte_young(tmp_pte)) 1394 pte = pte_mkyoung(pte); 1395 } 1396 return pte; 1397} 1398 1399#ifdef CONFIG_TRANSPARENT_HUGEPAGE 1400#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR 1401static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, 1402 unsigned long address, pmd_t *pmdp) 1403{ 1404 return pte_pmd(__ptep_get_and_clear_anysz(mm, address, (pte_t *)pmdp, PMD_SIZE)); 1405} 1406#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1407 1408static inline void ___ptep_set_wrprotect(struct mm_struct *mm, 1409 unsigned long address, pte_t *ptep, 1410 pte_t pte) 1411{ 1412 pte_t old_pte; 1413 1414 do { 1415 old_pte = pte; 1416 pte = pte_wrprotect(pte); 1417 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), 1418 pte_val(old_pte), pte_val(pte)); 1419 } while (pte_val(pte) != pte_val(old_pte)); 1420} 1421 1422/* 1423 * __ptep_set_wrprotect - mark read-only while transferring potential hardware 1424 * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit. 1425 */ 1426static inline void __ptep_set_wrprotect(struct mm_struct *mm, 1427 unsigned long address, pte_t *ptep) 1428{ 1429 ___ptep_set_wrprotect(mm, address, ptep, __ptep_get(ptep)); 1430} 1431 1432static inline void __wrprotect_ptes(struct mm_struct *mm, unsigned long address, 1433 pte_t *ptep, unsigned int nr) 1434{ 1435 unsigned int i; 1436 1437 for (i = 0; i < nr; i++, address += PAGE_SIZE, ptep++) 1438 __ptep_set_wrprotect(mm, address, ptep); 1439} 1440 1441static inline void __clear_young_dirty_pte(struct vm_area_struct *vma, 1442 unsigned long addr, pte_t *ptep, 1443 pte_t pte, cydp_t flags) 1444{ 1445 pte_t old_pte; 1446 1447 do { 1448 old_pte = pte; 1449 1450 if (flags & CYDP_CLEAR_YOUNG) 1451 pte = pte_mkold(pte); 1452 if (flags & CYDP_CLEAR_DIRTY) 1453 pte = pte_mkclean(pte); 1454 1455 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), 1456 pte_val(old_pte), pte_val(pte)); 1457 } while (pte_val(pte) != pte_val(old_pte)); 1458} 1459 1460static inline void __clear_young_dirty_ptes(struct vm_area_struct *vma, 1461 unsigned long addr, pte_t *ptep, 1462 unsigned int nr, cydp_t flags) 1463{ 1464 pte_t pte; 1465 1466 for (;;) { 1467 pte = __ptep_get(ptep); 1468 1469 if (flags == (CYDP_CLEAR_YOUNG | CYDP_CLEAR_DIRTY)) 1470 __set_pte(ptep, pte_mkclean(pte_mkold(pte))); 1471 else 1472 __clear_young_dirty_pte(vma, addr, ptep, pte, flags); 1473 1474 if (--nr == 0) 1475 break; 1476 ptep++; 1477 addr += PAGE_SIZE; 1478 } 1479} 1480 1481#ifdef CONFIG_TRANSPARENT_HUGEPAGE 1482#define __HAVE_ARCH_PMDP_SET_WRPROTECT 1483static inline void pmdp_set_wrprotect(struct mm_struct *mm, 1484 unsigned long address, pmd_t *pmdp) 1485{ 1486 __ptep_set_wrprotect(mm, address, (pte_t *)pmdp); 1487} 1488 1489#define pmdp_establish pmdp_establish 1490static inline pmd_t pmdp_establish(struct vm_area_struct *vma, 1491 unsigned long address, pmd_t *pmdp, pmd_t pmd) 1492{ 1493 page_table_check_pmd_set(vma->vm_mm, address, pmdp, pmd); 1494 return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd))); 1495} 1496#endif 1497 1498/* 1499 * Encode and decode a swap entry: 1500 * bits 0-1: present (must be zero) 1501 * bits 2: remember PG_anon_exclusive 1502 * bit 3: remember uffd-wp state 1503 * bits 6-10: swap type 1504 * bit 11: PTE_PRESENT_INVALID (must be zero) 1505 * bits 12-61: swap offset 1506 */ 1507#define __SWP_TYPE_SHIFT 6 1508#define __SWP_TYPE_BITS 5 1509#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) 1510#define __SWP_OFFSET_SHIFT 12 1511#define __SWP_OFFSET_BITS 50 1512#define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1) 1513 1514#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) 1515#define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK) 1516#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) 1517 1518#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 1519#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) 1520 1521#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 1522#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) }) 1523#define __swp_entry_to_pmd(swp) __pmd((swp).val) 1524#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ 1525 1526/* 1527 * Ensure that there are not more swap files than can be encoded in the kernel 1528 * PTEs. 1529 */ 1530#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) 1531 1532#ifdef CONFIG_ARM64_MTE 1533 1534#define __HAVE_ARCH_PREPARE_TO_SWAP 1535extern int arch_prepare_to_swap(struct folio *folio); 1536 1537#define __HAVE_ARCH_SWAP_INVALIDATE 1538static inline void arch_swap_invalidate_page(int type, pgoff_t offset) 1539{ 1540 if (system_supports_mte()) 1541 mte_invalidate_tags(type, offset); 1542} 1543 1544static inline void arch_swap_invalidate_area(int type) 1545{ 1546 if (system_supports_mte()) 1547 mte_invalidate_tags_area(type); 1548} 1549 1550#define __HAVE_ARCH_SWAP_RESTORE 1551extern void arch_swap_restore(swp_entry_t entry, struct folio *folio); 1552 1553#endif /* CONFIG_ARM64_MTE */ 1554 1555/* 1556 * On AArch64, the cache coherency is handled via the __set_ptes() function. 1557 */ 1558static inline void update_mmu_cache_range(struct vm_fault *vmf, 1559 struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, 1560 unsigned int nr) 1561{ 1562 /* 1563 * We don't do anything here, so there's a very small chance of 1564 * us retaking a user fault which we just fixed up. The alternative 1565 * is doing a dsb(ishst), but that penalises the fastpath. 1566 */ 1567} 1568 1569#define update_mmu_cache(vma, addr, ptep) \ 1570 update_mmu_cache_range(NULL, vma, addr, ptep, 1) 1571#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) 1572 1573#ifdef CONFIG_ARM64_PA_BITS_52 1574#define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52) 1575#else 1576#define phys_to_ttbr(addr) (addr) 1577#endif 1578 1579/* 1580 * On arm64 without hardware Access Flag, copying from user will fail because 1581 * the pte is old and cannot be marked young. So we always end up with zeroed 1582 * page after fork() + CoW for pfn mappings. We don't always have a 1583 * hardware-managed access flag on arm64. 1584 */ 1585#define arch_has_hw_pte_young cpu_has_hw_af 1586 1587#ifdef CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG 1588#define arch_has_hw_nonleaf_pmd_young system_supports_haft 1589#endif 1590 1591/* 1592 * Experimentally, it's cheap to set the access flag in hardware and we 1593 * benefit from prefaulting mappings as 'old' to start with. 1594 */ 1595#define arch_wants_old_prefaulted_pte cpu_has_hw_af 1596 1597/* 1598 * Request exec memory is read into pagecache in at least 64K folios. This size 1599 * can be contpte-mapped when 4K base pages are in use (16 pages into 1 iTLB 1600 * entry), and HPA can coalesce it (4 pages into 1 TLB entry) when 16K base 1601 * pages are in use. 1602 */ 1603#define exec_folio_order() ilog2(SZ_64K >> PAGE_SHIFT) 1604 1605static inline bool pud_sect_supported(void) 1606{ 1607 return PAGE_SIZE == SZ_4K; 1608} 1609 1610 1611#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION 1612#define ptep_modify_prot_start ptep_modify_prot_start 1613extern pte_t ptep_modify_prot_start(struct vm_area_struct *vma, 1614 unsigned long addr, pte_t *ptep); 1615 1616#define ptep_modify_prot_commit ptep_modify_prot_commit 1617extern void ptep_modify_prot_commit(struct vm_area_struct *vma, 1618 unsigned long addr, pte_t *ptep, 1619 pte_t old_pte, pte_t new_pte); 1620 1621#define modify_prot_start_ptes modify_prot_start_ptes 1622extern pte_t modify_prot_start_ptes(struct vm_area_struct *vma, 1623 unsigned long addr, pte_t *ptep, 1624 unsigned int nr); 1625 1626#define modify_prot_commit_ptes modify_prot_commit_ptes 1627extern void modify_prot_commit_ptes(struct vm_area_struct *vma, unsigned long addr, 1628 pte_t *ptep, pte_t old_pte, pte_t pte, 1629 unsigned int nr); 1630 1631#ifdef CONFIG_ARM64_CONTPTE 1632 1633/* 1634 * The contpte APIs are used to transparently manage the contiguous bit in ptes 1635 * where it is possible and makes sense to do so. The PTE_CONT bit is considered 1636 * a private implementation detail of the public ptep API (see below). 1637 */ 1638extern void __contpte_try_fold(struct mm_struct *mm, unsigned long addr, 1639 pte_t *ptep, pte_t pte); 1640extern void __contpte_try_unfold(struct mm_struct *mm, unsigned long addr, 1641 pte_t *ptep, pte_t pte); 1642extern pte_t contpte_ptep_get(pte_t *ptep, pte_t orig_pte); 1643extern pte_t contpte_ptep_get_lockless(pte_t *orig_ptep); 1644extern void contpte_set_ptes(struct mm_struct *mm, unsigned long addr, 1645 pte_t *ptep, pte_t pte, unsigned int nr); 1646extern void contpte_clear_full_ptes(struct mm_struct *mm, unsigned long addr, 1647 pte_t *ptep, unsigned int nr, int full); 1648extern pte_t contpte_get_and_clear_full_ptes(struct mm_struct *mm, 1649 unsigned long addr, pte_t *ptep, 1650 unsigned int nr, int full); 1651extern int contpte_ptep_test_and_clear_young(struct vm_area_struct *vma, 1652 unsigned long addr, pte_t *ptep); 1653extern int contpte_ptep_clear_flush_young(struct vm_area_struct *vma, 1654 unsigned long addr, pte_t *ptep); 1655extern void contpte_wrprotect_ptes(struct mm_struct *mm, unsigned long addr, 1656 pte_t *ptep, unsigned int nr); 1657extern int contpte_ptep_set_access_flags(struct vm_area_struct *vma, 1658 unsigned long addr, pte_t *ptep, 1659 pte_t entry, int dirty); 1660extern void contpte_clear_young_dirty_ptes(struct vm_area_struct *vma, 1661 unsigned long addr, pte_t *ptep, 1662 unsigned int nr, cydp_t flags); 1663 1664static __always_inline void contpte_try_fold(struct mm_struct *mm, 1665 unsigned long addr, pte_t *ptep, pte_t pte) 1666{ 1667 /* 1668 * Only bother trying if both the virtual and physical addresses are 1669 * aligned and correspond to the last entry in a contig range. The core 1670 * code mostly modifies ranges from low to high, so this is the likely 1671 * the last modification in the contig range, so a good time to fold. 1672 * We can't fold special mappings, because there is no associated folio. 1673 */ 1674 1675 const unsigned long contmask = CONT_PTES - 1; 1676 bool valign = ((addr >> PAGE_SHIFT) & contmask) == contmask; 1677 1678 if (unlikely(valign)) { 1679 bool palign = (pte_pfn(pte) & contmask) == contmask; 1680 1681 if (unlikely(palign && 1682 pte_valid(pte) && !pte_cont(pte) && !pte_special(pte))) 1683 __contpte_try_fold(mm, addr, ptep, pte); 1684 } 1685} 1686 1687static __always_inline void contpte_try_unfold(struct mm_struct *mm, 1688 unsigned long addr, pte_t *ptep, pte_t pte) 1689{ 1690 if (unlikely(pte_valid_cont(pte))) 1691 __contpte_try_unfold(mm, addr, ptep, pte); 1692} 1693 1694#define pte_batch_hint pte_batch_hint 1695static inline unsigned int pte_batch_hint(pte_t *ptep, pte_t pte) 1696{ 1697 if (!pte_valid_cont(pte)) 1698 return 1; 1699 1700 return CONT_PTES - (((unsigned long)ptep >> 3) & (CONT_PTES - 1)); 1701} 1702 1703/* 1704 * The below functions constitute the public API that arm64 presents to the 1705 * core-mm to manipulate PTE entries within their page tables (or at least this 1706 * is the subset of the API that arm64 needs to implement). These public 1707 * versions will automatically and transparently apply the contiguous bit where 1708 * it makes sense to do so. Therefore any users that are contig-aware (e.g. 1709 * hugetlb, kernel mapper) should NOT use these APIs, but instead use the 1710 * private versions, which are prefixed with double underscore. All of these 1711 * APIs except for ptep_get_lockless() are expected to be called with the PTL 1712 * held. Although the contiguous bit is considered private to the 1713 * implementation, it is deliberately allowed to leak through the getters (e.g. 1714 * ptep_get()), back to core code. This is required so that pte_leaf_size() can 1715 * provide an accurate size for perf_get_pgtable_size(). But this leakage means 1716 * its possible a pte will be passed to a setter with the contiguous bit set, so 1717 * we explicitly clear the contiguous bit in those cases to prevent accidentally 1718 * setting it in the pgtable. 1719 */ 1720 1721#define ptep_get ptep_get 1722static inline pte_t ptep_get(pte_t *ptep) 1723{ 1724 pte_t pte = __ptep_get(ptep); 1725 1726 if (likely(!pte_valid_cont(pte))) 1727 return pte; 1728 1729 return contpte_ptep_get(ptep, pte); 1730} 1731 1732#define ptep_get_lockless ptep_get_lockless 1733static inline pte_t ptep_get_lockless(pte_t *ptep) 1734{ 1735 pte_t pte = __ptep_get(ptep); 1736 1737 if (likely(!pte_valid_cont(pte))) 1738 return pte; 1739 1740 return contpte_ptep_get_lockless(ptep); 1741} 1742 1743static inline void set_pte(pte_t *ptep, pte_t pte) 1744{ 1745 /* 1746 * We don't have the mm or vaddr so cannot unfold contig entries (since 1747 * it requires tlb maintenance). set_pte() is not used in core code, so 1748 * this should never even be called. Regardless do our best to service 1749 * any call and emit a warning if there is any attempt to set a pte on 1750 * top of an existing contig range. 1751 */ 1752 pte_t orig_pte = __ptep_get(ptep); 1753 1754 WARN_ON_ONCE(pte_valid_cont(orig_pte)); 1755 __set_pte(ptep, pte_mknoncont(pte)); 1756} 1757 1758#define set_ptes set_ptes 1759static __always_inline void set_ptes(struct mm_struct *mm, unsigned long addr, 1760 pte_t *ptep, pte_t pte, unsigned int nr) 1761{ 1762 pte = pte_mknoncont(pte); 1763 1764 if (likely(nr == 1)) { 1765 contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); 1766 __set_ptes(mm, addr, ptep, pte, 1); 1767 contpte_try_fold(mm, addr, ptep, pte); 1768 } else { 1769 contpte_set_ptes(mm, addr, ptep, pte, nr); 1770 } 1771} 1772 1773static inline void pte_clear(struct mm_struct *mm, 1774 unsigned long addr, pte_t *ptep) 1775{ 1776 contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); 1777 __pte_clear(mm, addr, ptep); 1778} 1779 1780#define clear_full_ptes clear_full_ptes 1781static inline void clear_full_ptes(struct mm_struct *mm, unsigned long addr, 1782 pte_t *ptep, unsigned int nr, int full) 1783{ 1784 if (likely(nr == 1)) { 1785 contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); 1786 __clear_full_ptes(mm, addr, ptep, nr, full); 1787 } else { 1788 contpte_clear_full_ptes(mm, addr, ptep, nr, full); 1789 } 1790} 1791 1792#define get_and_clear_full_ptes get_and_clear_full_ptes 1793static inline pte_t get_and_clear_full_ptes(struct mm_struct *mm, 1794 unsigned long addr, pte_t *ptep, 1795 unsigned int nr, int full) 1796{ 1797 pte_t pte; 1798 1799 if (likely(nr == 1)) { 1800 contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); 1801 pte = __get_and_clear_full_ptes(mm, addr, ptep, nr, full); 1802 } else { 1803 pte = contpte_get_and_clear_full_ptes(mm, addr, ptep, nr, full); 1804 } 1805 1806 return pte; 1807} 1808 1809#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 1810static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 1811 unsigned long addr, pte_t *ptep) 1812{ 1813 contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); 1814 return __ptep_get_and_clear(mm, addr, ptep); 1815} 1816 1817#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 1818static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, 1819 unsigned long addr, pte_t *ptep) 1820{ 1821 pte_t orig_pte = __ptep_get(ptep); 1822 1823 if (likely(!pte_valid_cont(orig_pte))) 1824 return __ptep_test_and_clear_young(vma, addr, ptep); 1825 1826 return contpte_ptep_test_and_clear_young(vma, addr, ptep); 1827} 1828 1829#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 1830static inline int ptep_clear_flush_young(struct vm_area_struct *vma, 1831 unsigned long addr, pte_t *ptep) 1832{ 1833 pte_t orig_pte = __ptep_get(ptep); 1834 1835 if (likely(!pte_valid_cont(orig_pte))) 1836 return __ptep_clear_flush_young(vma, addr, ptep); 1837 1838 return contpte_ptep_clear_flush_young(vma, addr, ptep); 1839} 1840 1841#define wrprotect_ptes wrprotect_ptes 1842static __always_inline void wrprotect_ptes(struct mm_struct *mm, 1843 unsigned long addr, pte_t *ptep, unsigned int nr) 1844{ 1845 if (likely(nr == 1)) { 1846 /* 1847 * Optimization: wrprotect_ptes() can only be called for present 1848 * ptes so we only need to check contig bit as condition for 1849 * unfold, and we can remove the contig bit from the pte we read 1850 * to avoid re-reading. This speeds up fork() which is sensitive 1851 * for order-0 folios. Equivalent to contpte_try_unfold(). 1852 */ 1853 pte_t orig_pte = __ptep_get(ptep); 1854 1855 if (unlikely(pte_cont(orig_pte))) { 1856 __contpte_try_unfold(mm, addr, ptep, orig_pte); 1857 orig_pte = pte_mknoncont(orig_pte); 1858 } 1859 ___ptep_set_wrprotect(mm, addr, ptep, orig_pte); 1860 } else { 1861 contpte_wrprotect_ptes(mm, addr, ptep, nr); 1862 } 1863} 1864 1865#define __HAVE_ARCH_PTEP_SET_WRPROTECT 1866static inline void ptep_set_wrprotect(struct mm_struct *mm, 1867 unsigned long addr, pte_t *ptep) 1868{ 1869 wrprotect_ptes(mm, addr, ptep, 1); 1870} 1871 1872#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 1873static inline int ptep_set_access_flags(struct vm_area_struct *vma, 1874 unsigned long addr, pte_t *ptep, 1875 pte_t entry, int dirty) 1876{ 1877 pte_t orig_pte = __ptep_get(ptep); 1878 1879 entry = pte_mknoncont(entry); 1880 1881 if (likely(!pte_valid_cont(orig_pte))) 1882 return __ptep_set_access_flags(vma, addr, ptep, entry, dirty); 1883 1884 return contpte_ptep_set_access_flags(vma, addr, ptep, entry, dirty); 1885} 1886 1887#define clear_young_dirty_ptes clear_young_dirty_ptes 1888static inline void clear_young_dirty_ptes(struct vm_area_struct *vma, 1889 unsigned long addr, pte_t *ptep, 1890 unsigned int nr, cydp_t flags) 1891{ 1892 if (likely(nr == 1 && !pte_cont(__ptep_get(ptep)))) 1893 __clear_young_dirty_ptes(vma, addr, ptep, nr, flags); 1894 else 1895 contpte_clear_young_dirty_ptes(vma, addr, ptep, nr, flags); 1896} 1897 1898#else /* CONFIG_ARM64_CONTPTE */ 1899 1900#define ptep_get __ptep_get 1901#define set_pte __set_pte 1902#define set_ptes __set_ptes 1903#define pte_clear __pte_clear 1904#define clear_full_ptes __clear_full_ptes 1905#define get_and_clear_full_ptes __get_and_clear_full_ptes 1906#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 1907#define ptep_get_and_clear __ptep_get_and_clear 1908#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 1909#define ptep_test_and_clear_young __ptep_test_and_clear_young 1910#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 1911#define ptep_clear_flush_young __ptep_clear_flush_young 1912#define __HAVE_ARCH_PTEP_SET_WRPROTECT 1913#define ptep_set_wrprotect __ptep_set_wrprotect 1914#define wrprotect_ptes __wrprotect_ptes 1915#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 1916#define ptep_set_access_flags __ptep_set_access_flags 1917#define clear_young_dirty_ptes __clear_young_dirty_ptes 1918 1919#endif /* CONFIG_ARM64_CONTPTE */ 1920 1921#endif /* !__ASSEMBLER__ */ 1922 1923#endif /* __ASM_PGTABLE_H */