at v6.14 1830 lines 54 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2012 ARM Ltd. 4 */ 5#ifndef __ASM_PGTABLE_H 6#define __ASM_PGTABLE_H 7 8#include <asm/bug.h> 9#include <asm/proc-fns.h> 10 11#include <asm/memory.h> 12#include <asm/mte.h> 13#include <asm/pgtable-hwdef.h> 14#include <asm/pgtable-prot.h> 15#include <asm/tlbflush.h> 16 17/* 18 * VMALLOC range. 19 * 20 * VMALLOC_START: beginning of the kernel vmalloc space 21 * VMALLOC_END: extends to the available space below vmemmap 22 */ 23#define VMALLOC_START (MODULES_END) 24#if VA_BITS == VA_BITS_MIN 25#define VMALLOC_END (VMEMMAP_START - SZ_8M) 26#else 27#define VMEMMAP_UNUSED_NPAGES ((_PAGE_OFFSET(vabits_actual) - PAGE_OFFSET) >> PAGE_SHIFT) 28#define VMALLOC_END (VMEMMAP_START + VMEMMAP_UNUSED_NPAGES * sizeof(struct page) - SZ_8M) 29#endif 30 31#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)) 32 33#ifndef __ASSEMBLY__ 34 35#include <asm/cmpxchg.h> 36#include <asm/fixmap.h> 37#include <asm/por.h> 38#include <linux/mmdebug.h> 39#include <linux/mm_types.h> 40#include <linux/sched.h> 41#include <linux/page_table_check.h> 42 43#ifdef CONFIG_TRANSPARENT_HUGEPAGE 44#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE 45 46/* Set stride and tlb_level in flush_*_tlb_range */ 47#define flush_pmd_tlb_range(vma, addr, end) \ 48 __flush_tlb_range(vma, addr, end, PMD_SIZE, false, 2) 49#define flush_pud_tlb_range(vma, addr, end) \ 50 __flush_tlb_range(vma, addr, end, PUD_SIZE, false, 1) 51#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 52 53/* 54 * Outside of a few very special situations (e.g. hibernation), we always 55 * use broadcast TLB invalidation instructions, therefore a spurious page 56 * fault on one CPU which has been handled concurrently by another CPU 57 * does not need to perform additional invalidation. 58 */ 59#define flush_tlb_fix_spurious_fault(vma, address, ptep) do { } while (0) 60 61/* 62 * ZERO_PAGE is a global shared page that is always zero: used 63 * for zero-mapped memory areas etc.. 64 */ 65extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; 66#define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page)) 67 68#define pte_ERROR(e) \ 69 pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e)) 70 71/* 72 * Macros to convert between a physical address and its placement in a 73 * page table entry, taking care of 52-bit addresses. 74 */ 75#ifdef CONFIG_ARM64_PA_BITS_52 76static inline phys_addr_t __pte_to_phys(pte_t pte) 77{ 78 pte_val(pte) &= ~PTE_MAYBE_SHARED; 79 return (pte_val(pte) & PTE_ADDR_LOW) | 80 ((pte_val(pte) & PTE_ADDR_HIGH) << PTE_ADDR_HIGH_SHIFT); 81} 82static inline pteval_t __phys_to_pte_val(phys_addr_t phys) 83{ 84 return (phys | (phys >> PTE_ADDR_HIGH_SHIFT)) & PHYS_TO_PTE_ADDR_MASK; 85} 86#else 87#define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_LOW) 88#define __phys_to_pte_val(phys) (phys) 89#endif 90 91#define pte_pfn(pte) (__pte_to_phys(pte) >> PAGE_SHIFT) 92#define pfn_pte(pfn,prot) \ 93 __pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) 94 95#define pte_none(pte) (!pte_val(pte)) 96#define __pte_clear(mm, addr, ptep) \ 97 __set_pte(ptep, __pte(0)) 98#define pte_page(pte) (pfn_to_page(pte_pfn(pte))) 99 100/* 101 * The following only work if pte_present(). Undefined behaviour otherwise. 102 */ 103#define pte_present(pte) (pte_valid(pte) || pte_present_invalid(pte)) 104#define pte_young(pte) (!!(pte_val(pte) & PTE_AF)) 105#define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL)) 106#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE)) 107#define pte_rdonly(pte) (!!(pte_val(pte) & PTE_RDONLY)) 108#define pte_user(pte) (!!(pte_val(pte) & PTE_USER)) 109#define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN)) 110#define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT)) 111#define pte_devmap(pte) (!!(pte_val(pte) & PTE_DEVMAP)) 112#define pte_tagged(pte) ((pte_val(pte) & PTE_ATTRINDX_MASK) == \ 113 PTE_ATTRINDX(MT_NORMAL_TAGGED)) 114 115#define pte_cont_addr_end(addr, end) \ 116({ unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK; \ 117 (__boundary - 1 < (end) - 1) ? __boundary : (end); \ 118}) 119 120#define pmd_cont_addr_end(addr, end) \ 121({ unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK; \ 122 (__boundary - 1 < (end) - 1) ? __boundary : (end); \ 123}) 124 125#define pte_hw_dirty(pte) (pte_write(pte) && !pte_rdonly(pte)) 126#define pte_sw_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY)) 127#define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte)) 128 129#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID)) 130#define pte_present_invalid(pte) \ 131 ((pte_val(pte) & (PTE_VALID | PTE_PRESENT_INVALID)) == PTE_PRESENT_INVALID) 132/* 133 * Execute-only user mappings do not have the PTE_USER bit set. All valid 134 * kernel mappings have the PTE_UXN bit set. 135 */ 136#define pte_valid_not_user(pte) \ 137 ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN)) 138/* 139 * Returns true if the pte is valid and has the contiguous bit set. 140 */ 141#define pte_valid_cont(pte) (pte_valid(pte) && pte_cont(pte)) 142/* 143 * Could the pte be present in the TLB? We must check mm_tlb_flush_pending 144 * so that we don't erroneously return false for pages that have been 145 * remapped as PROT_NONE but are yet to be flushed from the TLB. 146 * Note that we can't make any assumptions based on the state of the access 147 * flag, since __ptep_clear_flush_young() elides a DSB when invalidating the 148 * TLB. 149 */ 150#define pte_accessible(mm, pte) \ 151 (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte)) 152 153static inline bool por_el0_allows_pkey(u8 pkey, bool write, bool execute) 154{ 155 u64 por; 156 157 if (!system_supports_poe()) 158 return true; 159 160 por = read_sysreg_s(SYS_POR_EL0); 161 162 if (write) 163 return por_elx_allows_write(por, pkey); 164 165 if (execute) 166 return por_elx_allows_exec(por, pkey); 167 168 return por_elx_allows_read(por, pkey); 169} 170 171/* 172 * p??_access_permitted() is true for valid user mappings (PTE_USER 173 * bit set, subject to the write permission check). For execute-only 174 * mappings, like PROT_EXEC with EPAN (both PTE_USER and PTE_UXN bits 175 * not set) must return false. PROT_NONE mappings do not have the 176 * PTE_VALID bit set. 177 */ 178#define pte_access_permitted_no_overlay(pte, write) \ 179 (((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) && (!(write) || pte_write(pte))) 180#define pte_access_permitted(pte, write) \ 181 (pte_access_permitted_no_overlay(pte, write) && \ 182 por_el0_allows_pkey(FIELD_GET(PTE_PO_IDX_MASK, pte_val(pte)), write, false)) 183#define pmd_access_permitted(pmd, write) \ 184 (pte_access_permitted(pmd_pte(pmd), (write))) 185#define pud_access_permitted(pud, write) \ 186 (pte_access_permitted(pud_pte(pud), (write))) 187 188static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) 189{ 190 pte_val(pte) &= ~pgprot_val(prot); 191 return pte; 192} 193 194static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot) 195{ 196 pte_val(pte) |= pgprot_val(prot); 197 return pte; 198} 199 200static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot) 201{ 202 pmd_val(pmd) &= ~pgprot_val(prot); 203 return pmd; 204} 205 206static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot) 207{ 208 pmd_val(pmd) |= pgprot_val(prot); 209 return pmd; 210} 211 212static inline pte_t pte_mkwrite_novma(pte_t pte) 213{ 214 pte = set_pte_bit(pte, __pgprot(PTE_WRITE)); 215 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); 216 return pte; 217} 218 219static inline pte_t pte_mkclean(pte_t pte) 220{ 221 pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY)); 222 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); 223 224 return pte; 225} 226 227static inline pte_t pte_mkdirty(pte_t pte) 228{ 229 pte = set_pte_bit(pte, __pgprot(PTE_DIRTY)); 230 231 if (pte_write(pte)) 232 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); 233 234 return pte; 235} 236 237static inline pte_t pte_wrprotect(pte_t pte) 238{ 239 /* 240 * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY 241 * clear), set the PTE_DIRTY bit. 242 */ 243 if (pte_hw_dirty(pte)) 244 pte = set_pte_bit(pte, __pgprot(PTE_DIRTY)); 245 246 pte = clear_pte_bit(pte, __pgprot(PTE_WRITE)); 247 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); 248 return pte; 249} 250 251static inline pte_t pte_mkold(pte_t pte) 252{ 253 return clear_pte_bit(pte, __pgprot(PTE_AF)); 254} 255 256static inline pte_t pte_mkyoung(pte_t pte) 257{ 258 return set_pte_bit(pte, __pgprot(PTE_AF)); 259} 260 261static inline pte_t pte_mkspecial(pte_t pte) 262{ 263 return set_pte_bit(pte, __pgprot(PTE_SPECIAL)); 264} 265 266static inline pte_t pte_mkcont(pte_t pte) 267{ 268 return set_pte_bit(pte, __pgprot(PTE_CONT)); 269} 270 271static inline pte_t pte_mknoncont(pte_t pte) 272{ 273 return clear_pte_bit(pte, __pgprot(PTE_CONT)); 274} 275 276static inline pte_t pte_mkvalid(pte_t pte) 277{ 278 return set_pte_bit(pte, __pgprot(PTE_VALID)); 279} 280 281static inline pte_t pte_mkinvalid(pte_t pte) 282{ 283 pte = set_pte_bit(pte, __pgprot(PTE_PRESENT_INVALID)); 284 pte = clear_pte_bit(pte, __pgprot(PTE_VALID)); 285 return pte; 286} 287 288static inline pmd_t pmd_mkcont(pmd_t pmd) 289{ 290 return __pmd(pmd_val(pmd) | PMD_SECT_CONT); 291} 292 293static inline pte_t pte_mkdevmap(pte_t pte) 294{ 295 return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL)); 296} 297 298#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP 299static inline int pte_uffd_wp(pte_t pte) 300{ 301 return !!(pte_val(pte) & PTE_UFFD_WP); 302} 303 304static inline pte_t pte_mkuffd_wp(pte_t pte) 305{ 306 return pte_wrprotect(set_pte_bit(pte, __pgprot(PTE_UFFD_WP))); 307} 308 309static inline pte_t pte_clear_uffd_wp(pte_t pte) 310{ 311 return clear_pte_bit(pte, __pgprot(PTE_UFFD_WP)); 312} 313#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */ 314 315static inline void __set_pte_nosync(pte_t *ptep, pte_t pte) 316{ 317 WRITE_ONCE(*ptep, pte); 318} 319 320static inline void __set_pte(pte_t *ptep, pte_t pte) 321{ 322 __set_pte_nosync(ptep, pte); 323 324 /* 325 * Only if the new pte is valid and kernel, otherwise TLB maintenance 326 * or update_mmu_cache() have the necessary barriers. 327 */ 328 if (pte_valid_not_user(pte)) { 329 dsb(ishst); 330 isb(); 331 } 332} 333 334static inline pte_t __ptep_get(pte_t *ptep) 335{ 336 return READ_ONCE(*ptep); 337} 338 339extern void __sync_icache_dcache(pte_t pteval); 340bool pgattr_change_is_safe(pteval_t old, pteval_t new); 341 342/* 343 * PTE bits configuration in the presence of hardware Dirty Bit Management 344 * (PTE_WRITE == PTE_DBM): 345 * 346 * Dirty Writable | PTE_RDONLY PTE_WRITE PTE_DIRTY (sw) 347 * 0 0 | 1 0 0 348 * 0 1 | 1 1 0 349 * 1 0 | 1 0 1 350 * 1 1 | 0 1 x 351 * 352 * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via 353 * the page fault mechanism. Checking the dirty status of a pte becomes: 354 * 355 * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY) 356 */ 357 358static inline void __check_safe_pte_update(struct mm_struct *mm, pte_t *ptep, 359 pte_t pte) 360{ 361 pte_t old_pte; 362 363 if (!IS_ENABLED(CONFIG_DEBUG_VM)) 364 return; 365 366 old_pte = __ptep_get(ptep); 367 368 if (!pte_valid(old_pte) || !pte_valid(pte)) 369 return; 370 if (mm != current->active_mm && atomic_read(&mm->mm_users) <= 1) 371 return; 372 373 /* 374 * Check for potential race with hardware updates of the pte 375 * (__ptep_set_access_flags safely changes valid ptes without going 376 * through an invalid entry). 377 */ 378 VM_WARN_ONCE(!pte_young(pte), 379 "%s: racy access flag clearing: 0x%016llx -> 0x%016llx", 380 __func__, pte_val(old_pte), pte_val(pte)); 381 VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte), 382 "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx", 383 __func__, pte_val(old_pte), pte_val(pte)); 384 VM_WARN_ONCE(!pgattr_change_is_safe(pte_val(old_pte), pte_val(pte)), 385 "%s: unsafe attribute change: 0x%016llx -> 0x%016llx", 386 __func__, pte_val(old_pte), pte_val(pte)); 387} 388 389static inline void __sync_cache_and_tags(pte_t pte, unsigned int nr_pages) 390{ 391 if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte)) 392 __sync_icache_dcache(pte); 393 394 /* 395 * If the PTE would provide user space access to the tags associated 396 * with it then ensure that the MTE tags are synchronised. Although 397 * pte_access_permitted_no_overlay() returns false for exec only 398 * mappings, they don't expose tags (instruction fetches don't check 399 * tags). 400 */ 401 if (system_supports_mte() && pte_access_permitted_no_overlay(pte, false) && 402 !pte_special(pte) && pte_tagged(pte)) 403 mte_sync_tags(pte, nr_pages); 404} 405 406/* 407 * Select all bits except the pfn 408 */ 409#define pte_pgprot pte_pgprot 410static inline pgprot_t pte_pgprot(pte_t pte) 411{ 412 unsigned long pfn = pte_pfn(pte); 413 414 return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte)); 415} 416 417#define pte_advance_pfn pte_advance_pfn 418static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr) 419{ 420 return pfn_pte(pte_pfn(pte) + nr, pte_pgprot(pte)); 421} 422 423static inline void __set_ptes(struct mm_struct *mm, 424 unsigned long __always_unused addr, 425 pte_t *ptep, pte_t pte, unsigned int nr) 426{ 427 page_table_check_ptes_set(mm, ptep, pte, nr); 428 __sync_cache_and_tags(pte, nr); 429 430 for (;;) { 431 __check_safe_pte_update(mm, ptep, pte); 432 __set_pte(ptep, pte); 433 if (--nr == 0) 434 break; 435 ptep++; 436 pte = pte_advance_pfn(pte, 1); 437 } 438} 439 440/* 441 * Hugetlb definitions. 442 */ 443#define HUGE_MAX_HSTATE 4 444#define HPAGE_SHIFT PMD_SHIFT 445#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) 446#define HPAGE_MASK (~(HPAGE_SIZE - 1)) 447#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 448 449static inline pte_t pgd_pte(pgd_t pgd) 450{ 451 return __pte(pgd_val(pgd)); 452} 453 454static inline pte_t p4d_pte(p4d_t p4d) 455{ 456 return __pte(p4d_val(p4d)); 457} 458 459static inline pte_t pud_pte(pud_t pud) 460{ 461 return __pte(pud_val(pud)); 462} 463 464static inline pud_t pte_pud(pte_t pte) 465{ 466 return __pud(pte_val(pte)); 467} 468 469static inline pmd_t pud_pmd(pud_t pud) 470{ 471 return __pmd(pud_val(pud)); 472} 473 474static inline pte_t pmd_pte(pmd_t pmd) 475{ 476 return __pte(pmd_val(pmd)); 477} 478 479static inline pmd_t pte_pmd(pte_t pte) 480{ 481 return __pmd(pte_val(pte)); 482} 483 484static inline pgprot_t mk_pud_sect_prot(pgprot_t prot) 485{ 486 return __pgprot((pgprot_val(prot) & ~PUD_TABLE_BIT) | PUD_TYPE_SECT); 487} 488 489static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot) 490{ 491 return __pgprot((pgprot_val(prot) & ~PMD_TABLE_BIT) | PMD_TYPE_SECT); 492} 493 494static inline pte_t pte_swp_mkexclusive(pte_t pte) 495{ 496 return set_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE)); 497} 498 499static inline int pte_swp_exclusive(pte_t pte) 500{ 501 return pte_val(pte) & PTE_SWP_EXCLUSIVE; 502} 503 504static inline pte_t pte_swp_clear_exclusive(pte_t pte) 505{ 506 return clear_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE)); 507} 508 509#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP 510static inline pte_t pte_swp_mkuffd_wp(pte_t pte) 511{ 512 return set_pte_bit(pte, __pgprot(PTE_SWP_UFFD_WP)); 513} 514 515static inline int pte_swp_uffd_wp(pte_t pte) 516{ 517 return !!(pte_val(pte) & PTE_SWP_UFFD_WP); 518} 519 520static inline pte_t pte_swp_clear_uffd_wp(pte_t pte) 521{ 522 return clear_pte_bit(pte, __pgprot(PTE_SWP_UFFD_WP)); 523} 524#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */ 525 526#ifdef CONFIG_NUMA_BALANCING 527/* 528 * See the comment in include/linux/pgtable.h 529 */ 530static inline int pte_protnone(pte_t pte) 531{ 532 /* 533 * pte_present_invalid() tells us that the pte is invalid from HW 534 * perspective but present from SW perspective, so the fields are to be 535 * interpretted as per the HW layout. The second 2 checks are the unique 536 * encoding that we use for PROT_NONE. It is insufficient to only use 537 * the first check because we share the same encoding scheme with pmds 538 * which support pmd_mkinvalid(), so can be present-invalid without 539 * being PROT_NONE. 540 */ 541 return pte_present_invalid(pte) && !pte_user(pte) && !pte_user_exec(pte); 542} 543 544static inline int pmd_protnone(pmd_t pmd) 545{ 546 return pte_protnone(pmd_pte(pmd)); 547} 548#endif 549 550#define pmd_present(pmd) pte_present(pmd_pte(pmd)) 551 552/* 553 * THP definitions. 554 */ 555 556#ifdef CONFIG_TRANSPARENT_HUGEPAGE 557static inline int pmd_trans_huge(pmd_t pmd) 558{ 559 return pmd_val(pmd) && pmd_present(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT); 560} 561#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 562 563#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) 564#define pmd_young(pmd) pte_young(pmd_pte(pmd)) 565#define pmd_valid(pmd) pte_valid(pmd_pte(pmd)) 566#define pmd_user(pmd) pte_user(pmd_pte(pmd)) 567#define pmd_user_exec(pmd) pte_user_exec(pmd_pte(pmd)) 568#define pmd_cont(pmd) pte_cont(pmd_pte(pmd)) 569#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) 570#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) 571#define pmd_mkwrite_novma(pmd) pte_pmd(pte_mkwrite_novma(pmd_pte(pmd))) 572#define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd))) 573#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) 574#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) 575#define pmd_mkinvalid(pmd) pte_pmd(pte_mkinvalid(pmd_pte(pmd))) 576#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP 577#define pmd_uffd_wp(pmd) pte_uffd_wp(pmd_pte(pmd)) 578#define pmd_mkuffd_wp(pmd) pte_pmd(pte_mkuffd_wp(pmd_pte(pmd))) 579#define pmd_clear_uffd_wp(pmd) pte_pmd(pte_clear_uffd_wp(pmd_pte(pmd))) 580#define pmd_swp_uffd_wp(pmd) pte_swp_uffd_wp(pmd_pte(pmd)) 581#define pmd_swp_mkuffd_wp(pmd) pte_pmd(pte_swp_mkuffd_wp(pmd_pte(pmd))) 582#define pmd_swp_clear_uffd_wp(pmd) \ 583 pte_pmd(pte_swp_clear_uffd_wp(pmd_pte(pmd))) 584#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */ 585 586#define pmd_write(pmd) pte_write(pmd_pte(pmd)) 587 588#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT)) 589 590#ifdef CONFIG_TRANSPARENT_HUGEPAGE 591#define pmd_devmap(pmd) pte_devmap(pmd_pte(pmd)) 592#endif 593static inline pmd_t pmd_mkdevmap(pmd_t pmd) 594{ 595 return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP))); 596} 597 598#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP 599#define pmd_special(pte) (!!((pmd_val(pte) & PTE_SPECIAL))) 600static inline pmd_t pmd_mkspecial(pmd_t pmd) 601{ 602 return set_pmd_bit(pmd, __pgprot(PTE_SPECIAL)); 603} 604#endif 605 606#define __pmd_to_phys(pmd) __pte_to_phys(pmd_pte(pmd)) 607#define __phys_to_pmd_val(phys) __phys_to_pte_val(phys) 608#define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT) 609#define pfn_pmd(pfn,prot) __pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) 610#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) 611 612#define pud_young(pud) pte_young(pud_pte(pud)) 613#define pud_mkyoung(pud) pte_pud(pte_mkyoung(pud_pte(pud))) 614#define pud_write(pud) pte_write(pud_pte(pud)) 615 616#define pud_mkhuge(pud) (__pud(pud_val(pud) & ~PUD_TABLE_BIT)) 617 618#define __pud_to_phys(pud) __pte_to_phys(pud_pte(pud)) 619#define __phys_to_pud_val(phys) __phys_to_pte_val(phys) 620#define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT) 621#define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) 622 623#ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP 624#define pud_special(pte) pte_special(pud_pte(pud)) 625#define pud_mkspecial(pte) pte_pud(pte_mkspecial(pud_pte(pud))) 626#endif 627 628#define pmd_pgprot pmd_pgprot 629static inline pgprot_t pmd_pgprot(pmd_t pmd) 630{ 631 unsigned long pfn = pmd_pfn(pmd); 632 633 return __pgprot(pmd_val(pfn_pmd(pfn, __pgprot(0))) ^ pmd_val(pmd)); 634} 635 636#define pud_pgprot pud_pgprot 637static inline pgprot_t pud_pgprot(pud_t pud) 638{ 639 unsigned long pfn = pud_pfn(pud); 640 641 return __pgprot(pud_val(pfn_pud(pfn, __pgprot(0))) ^ pud_val(pud)); 642} 643 644static inline void __set_pte_at(struct mm_struct *mm, 645 unsigned long __always_unused addr, 646 pte_t *ptep, pte_t pte, unsigned int nr) 647{ 648 __sync_cache_and_tags(pte, nr); 649 __check_safe_pte_update(mm, ptep, pte); 650 __set_pte(ptep, pte); 651} 652 653static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 654 pmd_t *pmdp, pmd_t pmd) 655{ 656 page_table_check_pmd_set(mm, pmdp, pmd); 657 return __set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd), 658 PMD_SIZE >> PAGE_SHIFT); 659} 660 661static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, 662 pud_t *pudp, pud_t pud) 663{ 664 page_table_check_pud_set(mm, pudp, pud); 665 return __set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud), 666 PUD_SIZE >> PAGE_SHIFT); 667} 668 669#define __p4d_to_phys(p4d) __pte_to_phys(p4d_pte(p4d)) 670#define __phys_to_p4d_val(phys) __phys_to_pte_val(phys) 671 672#define __pgd_to_phys(pgd) __pte_to_phys(pgd_pte(pgd)) 673#define __phys_to_pgd_val(phys) __phys_to_pte_val(phys) 674 675#define __pgprot_modify(prot,mask,bits) \ 676 __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) 677 678#define pgprot_nx(prot) \ 679 __pgprot_modify(prot, PTE_MAYBE_GP, PTE_PXN) 680 681#define pgprot_decrypted(prot) \ 682 __pgprot_modify(prot, PROT_NS_SHARED, PROT_NS_SHARED) 683#define pgprot_encrypted(prot) \ 684 __pgprot_modify(prot, PROT_NS_SHARED, 0) 685 686/* 687 * Mark the prot value as uncacheable and unbufferable. 688 */ 689#define pgprot_noncached(prot) \ 690 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN) 691#define pgprot_writecombine(prot) \ 692 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) 693#define pgprot_device(prot) \ 694 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN) 695#define pgprot_tagged(prot) \ 696 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_TAGGED)) 697#define pgprot_mhp pgprot_tagged 698/* 699 * DMA allocations for non-coherent devices use what the Arm architecture calls 700 * "Normal non-cacheable" memory, which permits speculation, unaligned accesses 701 * and merging of writes. This is different from "Device-nGnR[nE]" memory which 702 * is intended for MMIO and thus forbids speculation, preserves access size, 703 * requires strict alignment and can also force write responses to come from the 704 * endpoint. 705 */ 706#define pgprot_dmacoherent(prot) \ 707 __pgprot_modify(prot, PTE_ATTRINDX_MASK, \ 708 PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) 709 710#define __HAVE_PHYS_MEM_ACCESS_PROT 711struct file; 712extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 713 unsigned long size, pgprot_t vma_prot); 714 715#define pmd_none(pmd) (!pmd_val(pmd)) 716 717#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ 718 PMD_TYPE_TABLE) 719#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ 720 PMD_TYPE_SECT) 721#define pmd_leaf(pmd) (pmd_present(pmd) && !pmd_table(pmd)) 722#define pmd_bad(pmd) (!pmd_table(pmd)) 723 724#define pmd_leaf_size(pmd) (pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE) 725#define pte_leaf_size(pte) (pte_cont(pte) ? CONT_PTE_SIZE : PAGE_SIZE) 726 727#if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3 728static inline bool pud_sect(pud_t pud) { return false; } 729static inline bool pud_table(pud_t pud) { return true; } 730#else 731#define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ 732 PUD_TYPE_SECT) 733#define pud_table(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ 734 PUD_TYPE_TABLE) 735#endif 736 737extern pgd_t init_pg_dir[]; 738extern pgd_t init_pg_end[]; 739extern pgd_t swapper_pg_dir[]; 740extern pgd_t idmap_pg_dir[]; 741extern pgd_t tramp_pg_dir[]; 742extern pgd_t reserved_pg_dir[]; 743 744extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd); 745 746static inline bool in_swapper_pgdir(void *addr) 747{ 748 return ((unsigned long)addr & PAGE_MASK) == 749 ((unsigned long)swapper_pg_dir & PAGE_MASK); 750} 751 752static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) 753{ 754#ifdef __PAGETABLE_PMD_FOLDED 755 if (in_swapper_pgdir(pmdp)) { 756 set_swapper_pgd((pgd_t *)pmdp, __pgd(pmd_val(pmd))); 757 return; 758 } 759#endif /* __PAGETABLE_PMD_FOLDED */ 760 761 WRITE_ONCE(*pmdp, pmd); 762 763 if (pmd_valid(pmd)) { 764 dsb(ishst); 765 isb(); 766 } 767} 768 769static inline void pmd_clear(pmd_t *pmdp) 770{ 771 set_pmd(pmdp, __pmd(0)); 772} 773 774static inline phys_addr_t pmd_page_paddr(pmd_t pmd) 775{ 776 return __pmd_to_phys(pmd); 777} 778 779static inline unsigned long pmd_page_vaddr(pmd_t pmd) 780{ 781 return (unsigned long)__va(pmd_page_paddr(pmd)); 782} 783 784/* Find an entry in the third-level page table. */ 785#define pte_offset_phys(dir,addr) (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t)) 786 787#define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr)) 788#define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr)) 789#define pte_clear_fixmap() clear_fixmap(FIX_PTE) 790 791#define pmd_page(pmd) phys_to_page(__pmd_to_phys(pmd)) 792 793/* use ONLY for statically allocated translation tables */ 794#define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr)))) 795 796/* 797 * Conversion functions: convert a page and protection to a page entry, 798 * and a page entry and page directory to the page they refer to. 799 */ 800#define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot) 801 802#if CONFIG_PGTABLE_LEVELS > 2 803 804#define pmd_ERROR(e) \ 805 pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e)) 806 807#define pud_none(pud) (!pud_val(pud)) 808#define pud_bad(pud) (!pud_table(pud)) 809#define pud_present(pud) pte_present(pud_pte(pud)) 810#ifndef __PAGETABLE_PMD_FOLDED 811#define pud_leaf(pud) (pud_present(pud) && !pud_table(pud)) 812#else 813#define pud_leaf(pud) false 814#endif 815#define pud_valid(pud) pte_valid(pud_pte(pud)) 816#define pud_user(pud) pte_user(pud_pte(pud)) 817#define pud_user_exec(pud) pte_user_exec(pud_pte(pud)) 818 819static inline bool pgtable_l4_enabled(void); 820 821static inline void set_pud(pud_t *pudp, pud_t pud) 822{ 823 if (!pgtable_l4_enabled() && in_swapper_pgdir(pudp)) { 824 set_swapper_pgd((pgd_t *)pudp, __pgd(pud_val(pud))); 825 return; 826 } 827 828 WRITE_ONCE(*pudp, pud); 829 830 if (pud_valid(pud)) { 831 dsb(ishst); 832 isb(); 833 } 834} 835 836static inline void pud_clear(pud_t *pudp) 837{ 838 set_pud(pudp, __pud(0)); 839} 840 841static inline phys_addr_t pud_page_paddr(pud_t pud) 842{ 843 return __pud_to_phys(pud); 844} 845 846static inline pmd_t *pud_pgtable(pud_t pud) 847{ 848 return (pmd_t *)__va(pud_page_paddr(pud)); 849} 850 851/* Find an entry in the second-level page table. */ 852#define pmd_offset_phys(dir, addr) (pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t)) 853 854#define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr)) 855#define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr)) 856#define pmd_clear_fixmap() clear_fixmap(FIX_PMD) 857 858#define pud_page(pud) phys_to_page(__pud_to_phys(pud)) 859 860/* use ONLY for statically allocated translation tables */ 861#define pmd_offset_kimg(dir,addr) ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr)))) 862 863#else 864 865#define pud_valid(pud) false 866#define pud_page_paddr(pud) ({ BUILD_BUG(); 0; }) 867#define pud_user_exec(pud) pud_user(pud) /* Always 0 with folding */ 868 869/* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */ 870#define pmd_set_fixmap(addr) NULL 871#define pmd_set_fixmap_offset(pudp, addr) ((pmd_t *)pudp) 872#define pmd_clear_fixmap() 873 874#define pmd_offset_kimg(dir,addr) ((pmd_t *)dir) 875 876#endif /* CONFIG_PGTABLE_LEVELS > 2 */ 877 878#if CONFIG_PGTABLE_LEVELS > 3 879 880static __always_inline bool pgtable_l4_enabled(void) 881{ 882 if (CONFIG_PGTABLE_LEVELS > 4 || !IS_ENABLED(CONFIG_ARM64_LPA2)) 883 return true; 884 if (!alternative_has_cap_likely(ARM64_ALWAYS_BOOT)) 885 return vabits_actual == VA_BITS; 886 return alternative_has_cap_unlikely(ARM64_HAS_VA52); 887} 888 889static inline bool mm_pud_folded(const struct mm_struct *mm) 890{ 891 return !pgtable_l4_enabled(); 892} 893#define mm_pud_folded mm_pud_folded 894 895#define pud_ERROR(e) \ 896 pr_err("%s:%d: bad pud %016llx.\n", __FILE__, __LINE__, pud_val(e)) 897 898#define p4d_none(p4d) (pgtable_l4_enabled() && !p4d_val(p4d)) 899#define p4d_bad(p4d) (pgtable_l4_enabled() && !(p4d_val(p4d) & P4D_TABLE_BIT)) 900#define p4d_present(p4d) (!p4d_none(p4d)) 901 902static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) 903{ 904 if (in_swapper_pgdir(p4dp)) { 905 set_swapper_pgd((pgd_t *)p4dp, __pgd(p4d_val(p4d))); 906 return; 907 } 908 909 WRITE_ONCE(*p4dp, p4d); 910 dsb(ishst); 911 isb(); 912} 913 914static inline void p4d_clear(p4d_t *p4dp) 915{ 916 if (pgtable_l4_enabled()) 917 set_p4d(p4dp, __p4d(0)); 918} 919 920static inline phys_addr_t p4d_page_paddr(p4d_t p4d) 921{ 922 return __p4d_to_phys(p4d); 923} 924 925#define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) 926 927static inline pud_t *p4d_to_folded_pud(p4d_t *p4dp, unsigned long addr) 928{ 929 /* Ensure that 'p4dp' indexes a page table according to 'addr' */ 930 VM_BUG_ON(((addr >> P4D_SHIFT) ^ ((u64)p4dp >> 3)) % PTRS_PER_P4D); 931 932 return (pud_t *)PTR_ALIGN_DOWN(p4dp, PAGE_SIZE) + pud_index(addr); 933} 934 935static inline pud_t *p4d_pgtable(p4d_t p4d) 936{ 937 return (pud_t *)__va(p4d_page_paddr(p4d)); 938} 939 940static inline phys_addr_t pud_offset_phys(p4d_t *p4dp, unsigned long addr) 941{ 942 BUG_ON(!pgtable_l4_enabled()); 943 944 return p4d_page_paddr(READ_ONCE(*p4dp)) + pud_index(addr) * sizeof(pud_t); 945} 946 947static inline 948pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long addr) 949{ 950 if (!pgtable_l4_enabled()) 951 return p4d_to_folded_pud(p4dp, addr); 952 return (pud_t *)__va(p4d_page_paddr(p4d)) + pud_index(addr); 953} 954#define pud_offset_lockless pud_offset_lockless 955 956static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long addr) 957{ 958 return pud_offset_lockless(p4dp, READ_ONCE(*p4dp), addr); 959} 960#define pud_offset pud_offset 961 962static inline pud_t *pud_set_fixmap(unsigned long addr) 963{ 964 if (!pgtable_l4_enabled()) 965 return NULL; 966 return (pud_t *)set_fixmap_offset(FIX_PUD, addr); 967} 968 969static inline pud_t *pud_set_fixmap_offset(p4d_t *p4dp, unsigned long addr) 970{ 971 if (!pgtable_l4_enabled()) 972 return p4d_to_folded_pud(p4dp, addr); 973 return pud_set_fixmap(pud_offset_phys(p4dp, addr)); 974} 975 976static inline void pud_clear_fixmap(void) 977{ 978 if (pgtable_l4_enabled()) 979 clear_fixmap(FIX_PUD); 980} 981 982/* use ONLY for statically allocated translation tables */ 983static inline pud_t *pud_offset_kimg(p4d_t *p4dp, u64 addr) 984{ 985 if (!pgtable_l4_enabled()) 986 return p4d_to_folded_pud(p4dp, addr); 987 return (pud_t *)__phys_to_kimg(pud_offset_phys(p4dp, addr)); 988} 989 990#define p4d_page(p4d) pfn_to_page(__phys_to_pfn(__p4d_to_phys(p4d))) 991 992#else 993 994static inline bool pgtable_l4_enabled(void) { return false; } 995 996#define p4d_page_paddr(p4d) ({ BUILD_BUG(); 0;}) 997 998/* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */ 999#define pud_set_fixmap(addr) NULL 1000#define pud_set_fixmap_offset(pgdp, addr) ((pud_t *)pgdp) 1001#define pud_clear_fixmap() 1002 1003#define pud_offset_kimg(dir,addr) ((pud_t *)dir) 1004 1005#endif /* CONFIG_PGTABLE_LEVELS > 3 */ 1006 1007#if CONFIG_PGTABLE_LEVELS > 4 1008 1009static __always_inline bool pgtable_l5_enabled(void) 1010{ 1011 if (!alternative_has_cap_likely(ARM64_ALWAYS_BOOT)) 1012 return vabits_actual == VA_BITS; 1013 return alternative_has_cap_unlikely(ARM64_HAS_VA52); 1014} 1015 1016static inline bool mm_p4d_folded(const struct mm_struct *mm) 1017{ 1018 return !pgtable_l5_enabled(); 1019} 1020#define mm_p4d_folded mm_p4d_folded 1021 1022#define p4d_ERROR(e) \ 1023 pr_err("%s:%d: bad p4d %016llx.\n", __FILE__, __LINE__, p4d_val(e)) 1024 1025#define pgd_none(pgd) (pgtable_l5_enabled() && !pgd_val(pgd)) 1026#define pgd_bad(pgd) (pgtable_l5_enabled() && !(pgd_val(pgd) & PGD_TABLE_BIT)) 1027#define pgd_present(pgd) (!pgd_none(pgd)) 1028 1029static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) 1030{ 1031 if (in_swapper_pgdir(pgdp)) { 1032 set_swapper_pgd(pgdp, __pgd(pgd_val(pgd))); 1033 return; 1034 } 1035 1036 WRITE_ONCE(*pgdp, pgd); 1037 dsb(ishst); 1038 isb(); 1039} 1040 1041static inline void pgd_clear(pgd_t *pgdp) 1042{ 1043 if (pgtable_l5_enabled()) 1044 set_pgd(pgdp, __pgd(0)); 1045} 1046 1047static inline phys_addr_t pgd_page_paddr(pgd_t pgd) 1048{ 1049 return __pgd_to_phys(pgd); 1050} 1051 1052#define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1)) 1053 1054static inline p4d_t *pgd_to_folded_p4d(pgd_t *pgdp, unsigned long addr) 1055{ 1056 /* Ensure that 'pgdp' indexes a page table according to 'addr' */ 1057 VM_BUG_ON(((addr >> PGDIR_SHIFT) ^ ((u64)pgdp >> 3)) % PTRS_PER_PGD); 1058 1059 return (p4d_t *)PTR_ALIGN_DOWN(pgdp, PAGE_SIZE) + p4d_index(addr); 1060} 1061 1062static inline phys_addr_t p4d_offset_phys(pgd_t *pgdp, unsigned long addr) 1063{ 1064 BUG_ON(!pgtable_l5_enabled()); 1065 1066 return pgd_page_paddr(READ_ONCE(*pgdp)) + p4d_index(addr) * sizeof(p4d_t); 1067} 1068 1069static inline 1070p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long addr) 1071{ 1072 if (!pgtable_l5_enabled()) 1073 return pgd_to_folded_p4d(pgdp, addr); 1074 return (p4d_t *)__va(pgd_page_paddr(pgd)) + p4d_index(addr); 1075} 1076#define p4d_offset_lockless p4d_offset_lockless 1077 1078static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long addr) 1079{ 1080 return p4d_offset_lockless(pgdp, READ_ONCE(*pgdp), addr); 1081} 1082 1083static inline p4d_t *p4d_set_fixmap(unsigned long addr) 1084{ 1085 if (!pgtable_l5_enabled()) 1086 return NULL; 1087 return (p4d_t *)set_fixmap_offset(FIX_P4D, addr); 1088} 1089 1090static inline p4d_t *p4d_set_fixmap_offset(pgd_t *pgdp, unsigned long addr) 1091{ 1092 if (!pgtable_l5_enabled()) 1093 return pgd_to_folded_p4d(pgdp, addr); 1094 return p4d_set_fixmap(p4d_offset_phys(pgdp, addr)); 1095} 1096 1097static inline void p4d_clear_fixmap(void) 1098{ 1099 if (pgtable_l5_enabled()) 1100 clear_fixmap(FIX_P4D); 1101} 1102 1103/* use ONLY for statically allocated translation tables */ 1104static inline p4d_t *p4d_offset_kimg(pgd_t *pgdp, u64 addr) 1105{ 1106 if (!pgtable_l5_enabled()) 1107 return pgd_to_folded_p4d(pgdp, addr); 1108 return (p4d_t *)__phys_to_kimg(p4d_offset_phys(pgdp, addr)); 1109} 1110 1111#define pgd_page(pgd) pfn_to_page(__phys_to_pfn(__pgd_to_phys(pgd))) 1112 1113#else 1114 1115static inline bool pgtable_l5_enabled(void) { return false; } 1116 1117#define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1)) 1118 1119/* Match p4d_offset folding in <asm/generic/pgtable-nop4d.h> */ 1120#define p4d_set_fixmap(addr) NULL 1121#define p4d_set_fixmap_offset(p4dp, addr) ((p4d_t *)p4dp) 1122#define p4d_clear_fixmap() 1123 1124#define p4d_offset_kimg(dir,addr) ((p4d_t *)dir) 1125 1126static inline 1127p4d_t *p4d_offset_lockless_folded(pgd_t *pgdp, pgd_t pgd, unsigned long addr) 1128{ 1129 /* 1130 * With runtime folding of the pud, pud_offset_lockless() passes 1131 * the 'pgd_t *' we return here to p4d_to_folded_pud(), which 1132 * will offset the pointer assuming that it points into 1133 * a page-table page. However, the fast GUP path passes us a 1134 * pgd_t allocated on the stack and so we must use the original 1135 * pointer in 'pgdp' to construct the p4d pointer instead of 1136 * using the generic p4d_offset_lockless() implementation. 1137 * 1138 * Note: reusing the original pointer means that we may 1139 * dereference the same (live) page-table entry multiple times. 1140 * This is safe because it is still only loaded once in the 1141 * context of each level and the CPU guarantees same-address 1142 * read-after-read ordering. 1143 */ 1144 return p4d_offset(pgdp, addr); 1145} 1146#define p4d_offset_lockless p4d_offset_lockless_folded 1147 1148#endif /* CONFIG_PGTABLE_LEVELS > 4 */ 1149 1150#define pgd_ERROR(e) \ 1151 pr_err("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e)) 1152 1153#define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr)) 1154#define pgd_clear_fixmap() clear_fixmap(FIX_PGD) 1155 1156static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 1157{ 1158 /* 1159 * Normal and Normal-Tagged are two different memory types and indices 1160 * in MAIR_EL1. The mask below has to include PTE_ATTRINDX_MASK. 1161 */ 1162 const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | 1163 PTE_PRESENT_INVALID | PTE_VALID | PTE_WRITE | 1164 PTE_GP | PTE_ATTRINDX_MASK | PTE_PO_IDX_MASK; 1165 1166 /* preserve the hardware dirty information */ 1167 if (pte_hw_dirty(pte)) 1168 pte = set_pte_bit(pte, __pgprot(PTE_DIRTY)); 1169 1170 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); 1171 /* 1172 * If we end up clearing hw dirtiness for a sw-dirty PTE, set hardware 1173 * dirtiness again. 1174 */ 1175 if (pte_sw_dirty(pte)) 1176 pte = pte_mkdirty(pte); 1177 return pte; 1178} 1179 1180static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 1181{ 1182 return pte_pmd(pte_modify(pmd_pte(pmd), newprot)); 1183} 1184 1185extern int __ptep_set_access_flags(struct vm_area_struct *vma, 1186 unsigned long address, pte_t *ptep, 1187 pte_t entry, int dirty); 1188 1189#ifdef CONFIG_TRANSPARENT_HUGEPAGE 1190#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 1191static inline int pmdp_set_access_flags(struct vm_area_struct *vma, 1192 unsigned long address, pmd_t *pmdp, 1193 pmd_t entry, int dirty) 1194{ 1195 return __ptep_set_access_flags(vma, address, (pte_t *)pmdp, 1196 pmd_pte(entry), dirty); 1197} 1198 1199static inline int pud_devmap(pud_t pud) 1200{ 1201 return 0; 1202} 1203 1204static inline int pgd_devmap(pgd_t pgd) 1205{ 1206 return 0; 1207} 1208#endif 1209 1210#ifdef CONFIG_PAGE_TABLE_CHECK 1211static inline bool pte_user_accessible_page(pte_t pte) 1212{ 1213 return pte_valid(pte) && (pte_user(pte) || pte_user_exec(pte)); 1214} 1215 1216static inline bool pmd_user_accessible_page(pmd_t pmd) 1217{ 1218 return pmd_valid(pmd) && !pmd_table(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd)); 1219} 1220 1221static inline bool pud_user_accessible_page(pud_t pud) 1222{ 1223 return pud_valid(pud) && !pud_table(pud) && (pud_user(pud) || pud_user_exec(pud)); 1224} 1225#endif 1226 1227/* 1228 * Atomic pte/pmd modifications. 1229 */ 1230static inline int __ptep_test_and_clear_young(struct vm_area_struct *vma, 1231 unsigned long address, 1232 pte_t *ptep) 1233{ 1234 pte_t old_pte, pte; 1235 1236 pte = __ptep_get(ptep); 1237 do { 1238 old_pte = pte; 1239 pte = pte_mkold(pte); 1240 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), 1241 pte_val(old_pte), pte_val(pte)); 1242 } while (pte_val(pte) != pte_val(old_pte)); 1243 1244 return pte_young(pte); 1245} 1246 1247static inline int __ptep_clear_flush_young(struct vm_area_struct *vma, 1248 unsigned long address, pte_t *ptep) 1249{ 1250 int young = __ptep_test_and_clear_young(vma, address, ptep); 1251 1252 if (young) { 1253 /* 1254 * We can elide the trailing DSB here since the worst that can 1255 * happen is that a CPU continues to use the young entry in its 1256 * TLB and we mistakenly reclaim the associated page. The 1257 * window for such an event is bounded by the next 1258 * context-switch, which provides a DSB to complete the TLB 1259 * invalidation. 1260 */ 1261 flush_tlb_page_nosync(vma, address); 1262 } 1263 1264 return young; 1265} 1266 1267#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG) 1268#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG 1269static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, 1270 unsigned long address, 1271 pmd_t *pmdp) 1272{ 1273 /* Operation applies to PMD table entry only if FEAT_HAFT is enabled */ 1274 VM_WARN_ON(pmd_table(READ_ONCE(*pmdp)) && !system_supports_haft()); 1275 return __ptep_test_and_clear_young(vma, address, (pte_t *)pmdp); 1276} 1277#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG */ 1278 1279static inline pte_t __ptep_get_and_clear(struct mm_struct *mm, 1280 unsigned long address, pte_t *ptep) 1281{ 1282 pte_t pte = __pte(xchg_relaxed(&pte_val(*ptep), 0)); 1283 1284 page_table_check_pte_clear(mm, pte); 1285 1286 return pte; 1287} 1288 1289static inline void __clear_full_ptes(struct mm_struct *mm, unsigned long addr, 1290 pte_t *ptep, unsigned int nr, int full) 1291{ 1292 for (;;) { 1293 __ptep_get_and_clear(mm, addr, ptep); 1294 if (--nr == 0) 1295 break; 1296 ptep++; 1297 addr += PAGE_SIZE; 1298 } 1299} 1300 1301static inline pte_t __get_and_clear_full_ptes(struct mm_struct *mm, 1302 unsigned long addr, pte_t *ptep, 1303 unsigned int nr, int full) 1304{ 1305 pte_t pte, tmp_pte; 1306 1307 pte = __ptep_get_and_clear(mm, addr, ptep); 1308 while (--nr) { 1309 ptep++; 1310 addr += PAGE_SIZE; 1311 tmp_pte = __ptep_get_and_clear(mm, addr, ptep); 1312 if (pte_dirty(tmp_pte)) 1313 pte = pte_mkdirty(pte); 1314 if (pte_young(tmp_pte)) 1315 pte = pte_mkyoung(pte); 1316 } 1317 return pte; 1318} 1319 1320#ifdef CONFIG_TRANSPARENT_HUGEPAGE 1321#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR 1322static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, 1323 unsigned long address, pmd_t *pmdp) 1324{ 1325 pmd_t pmd = __pmd(xchg_relaxed(&pmd_val(*pmdp), 0)); 1326 1327 page_table_check_pmd_clear(mm, pmd); 1328 1329 return pmd; 1330} 1331#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1332 1333static inline void ___ptep_set_wrprotect(struct mm_struct *mm, 1334 unsigned long address, pte_t *ptep, 1335 pte_t pte) 1336{ 1337 pte_t old_pte; 1338 1339 do { 1340 old_pte = pte; 1341 pte = pte_wrprotect(pte); 1342 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), 1343 pte_val(old_pte), pte_val(pte)); 1344 } while (pte_val(pte) != pte_val(old_pte)); 1345} 1346 1347/* 1348 * __ptep_set_wrprotect - mark read-only while transferring potential hardware 1349 * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit. 1350 */ 1351static inline void __ptep_set_wrprotect(struct mm_struct *mm, 1352 unsigned long address, pte_t *ptep) 1353{ 1354 ___ptep_set_wrprotect(mm, address, ptep, __ptep_get(ptep)); 1355} 1356 1357static inline void __wrprotect_ptes(struct mm_struct *mm, unsigned long address, 1358 pte_t *ptep, unsigned int nr) 1359{ 1360 unsigned int i; 1361 1362 for (i = 0; i < nr; i++, address += PAGE_SIZE, ptep++) 1363 __ptep_set_wrprotect(mm, address, ptep); 1364} 1365 1366static inline void __clear_young_dirty_pte(struct vm_area_struct *vma, 1367 unsigned long addr, pte_t *ptep, 1368 pte_t pte, cydp_t flags) 1369{ 1370 pte_t old_pte; 1371 1372 do { 1373 old_pte = pte; 1374 1375 if (flags & CYDP_CLEAR_YOUNG) 1376 pte = pte_mkold(pte); 1377 if (flags & CYDP_CLEAR_DIRTY) 1378 pte = pte_mkclean(pte); 1379 1380 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), 1381 pte_val(old_pte), pte_val(pte)); 1382 } while (pte_val(pte) != pte_val(old_pte)); 1383} 1384 1385static inline void __clear_young_dirty_ptes(struct vm_area_struct *vma, 1386 unsigned long addr, pte_t *ptep, 1387 unsigned int nr, cydp_t flags) 1388{ 1389 pte_t pte; 1390 1391 for (;;) { 1392 pte = __ptep_get(ptep); 1393 1394 if (flags == (CYDP_CLEAR_YOUNG | CYDP_CLEAR_DIRTY)) 1395 __set_pte(ptep, pte_mkclean(pte_mkold(pte))); 1396 else 1397 __clear_young_dirty_pte(vma, addr, ptep, pte, flags); 1398 1399 if (--nr == 0) 1400 break; 1401 ptep++; 1402 addr += PAGE_SIZE; 1403 } 1404} 1405 1406#ifdef CONFIG_TRANSPARENT_HUGEPAGE 1407#define __HAVE_ARCH_PMDP_SET_WRPROTECT 1408static inline void pmdp_set_wrprotect(struct mm_struct *mm, 1409 unsigned long address, pmd_t *pmdp) 1410{ 1411 __ptep_set_wrprotect(mm, address, (pte_t *)pmdp); 1412} 1413 1414#define pmdp_establish pmdp_establish 1415static inline pmd_t pmdp_establish(struct vm_area_struct *vma, 1416 unsigned long address, pmd_t *pmdp, pmd_t pmd) 1417{ 1418 page_table_check_pmd_set(vma->vm_mm, pmdp, pmd); 1419 return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd))); 1420} 1421#endif 1422 1423/* 1424 * Encode and decode a swap entry: 1425 * bits 0-1: present (must be zero) 1426 * bits 2: remember PG_anon_exclusive 1427 * bit 3: remember uffd-wp state 1428 * bits 6-10: swap type 1429 * bit 11: PTE_PRESENT_INVALID (must be zero) 1430 * bits 12-61: swap offset 1431 */ 1432#define __SWP_TYPE_SHIFT 6 1433#define __SWP_TYPE_BITS 5 1434#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) 1435#define __SWP_OFFSET_SHIFT 12 1436#define __SWP_OFFSET_BITS 50 1437#define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1) 1438 1439#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) 1440#define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK) 1441#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) 1442 1443#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 1444#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) 1445 1446#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 1447#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) }) 1448#define __swp_entry_to_pmd(swp) __pmd((swp).val) 1449#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ 1450 1451/* 1452 * Ensure that there are not more swap files than can be encoded in the kernel 1453 * PTEs. 1454 */ 1455#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) 1456 1457#ifdef CONFIG_ARM64_MTE 1458 1459#define __HAVE_ARCH_PREPARE_TO_SWAP 1460extern int arch_prepare_to_swap(struct folio *folio); 1461 1462#define __HAVE_ARCH_SWAP_INVALIDATE 1463static inline void arch_swap_invalidate_page(int type, pgoff_t offset) 1464{ 1465 if (system_supports_mte()) 1466 mte_invalidate_tags(type, offset); 1467} 1468 1469static inline void arch_swap_invalidate_area(int type) 1470{ 1471 if (system_supports_mte()) 1472 mte_invalidate_tags_area(type); 1473} 1474 1475#define __HAVE_ARCH_SWAP_RESTORE 1476extern void arch_swap_restore(swp_entry_t entry, struct folio *folio); 1477 1478#endif /* CONFIG_ARM64_MTE */ 1479 1480/* 1481 * On AArch64, the cache coherency is handled via the __set_ptes() function. 1482 */ 1483static inline void update_mmu_cache_range(struct vm_fault *vmf, 1484 struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, 1485 unsigned int nr) 1486{ 1487 /* 1488 * We don't do anything here, so there's a very small chance of 1489 * us retaking a user fault which we just fixed up. The alternative 1490 * is doing a dsb(ishst), but that penalises the fastpath. 1491 */ 1492} 1493 1494#define update_mmu_cache(vma, addr, ptep) \ 1495 update_mmu_cache_range(NULL, vma, addr, ptep, 1) 1496#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) 1497 1498#ifdef CONFIG_ARM64_PA_BITS_52 1499#define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52) 1500#else 1501#define phys_to_ttbr(addr) (addr) 1502#endif 1503 1504/* 1505 * On arm64 without hardware Access Flag, copying from user will fail because 1506 * the pte is old and cannot be marked young. So we always end up with zeroed 1507 * page after fork() + CoW for pfn mappings. We don't always have a 1508 * hardware-managed access flag on arm64. 1509 */ 1510#define arch_has_hw_pte_young cpu_has_hw_af 1511 1512#ifdef CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG 1513#define arch_has_hw_nonleaf_pmd_young system_supports_haft 1514#endif 1515 1516/* 1517 * Experimentally, it's cheap to set the access flag in hardware and we 1518 * benefit from prefaulting mappings as 'old' to start with. 1519 */ 1520#define arch_wants_old_prefaulted_pte cpu_has_hw_af 1521 1522static inline bool pud_sect_supported(void) 1523{ 1524 return PAGE_SIZE == SZ_4K; 1525} 1526 1527 1528#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION 1529#define ptep_modify_prot_start ptep_modify_prot_start 1530extern pte_t ptep_modify_prot_start(struct vm_area_struct *vma, 1531 unsigned long addr, pte_t *ptep); 1532 1533#define ptep_modify_prot_commit ptep_modify_prot_commit 1534extern void ptep_modify_prot_commit(struct vm_area_struct *vma, 1535 unsigned long addr, pte_t *ptep, 1536 pte_t old_pte, pte_t new_pte); 1537 1538#ifdef CONFIG_ARM64_CONTPTE 1539 1540/* 1541 * The contpte APIs are used to transparently manage the contiguous bit in ptes 1542 * where it is possible and makes sense to do so. The PTE_CONT bit is considered 1543 * a private implementation detail of the public ptep API (see below). 1544 */ 1545extern void __contpte_try_fold(struct mm_struct *mm, unsigned long addr, 1546 pte_t *ptep, pte_t pte); 1547extern void __contpte_try_unfold(struct mm_struct *mm, unsigned long addr, 1548 pte_t *ptep, pte_t pte); 1549extern pte_t contpte_ptep_get(pte_t *ptep, pte_t orig_pte); 1550extern pte_t contpte_ptep_get_lockless(pte_t *orig_ptep); 1551extern void contpte_set_ptes(struct mm_struct *mm, unsigned long addr, 1552 pte_t *ptep, pte_t pte, unsigned int nr); 1553extern void contpte_clear_full_ptes(struct mm_struct *mm, unsigned long addr, 1554 pte_t *ptep, unsigned int nr, int full); 1555extern pte_t contpte_get_and_clear_full_ptes(struct mm_struct *mm, 1556 unsigned long addr, pte_t *ptep, 1557 unsigned int nr, int full); 1558extern int contpte_ptep_test_and_clear_young(struct vm_area_struct *vma, 1559 unsigned long addr, pte_t *ptep); 1560extern int contpte_ptep_clear_flush_young(struct vm_area_struct *vma, 1561 unsigned long addr, pte_t *ptep); 1562extern void contpte_wrprotect_ptes(struct mm_struct *mm, unsigned long addr, 1563 pte_t *ptep, unsigned int nr); 1564extern int contpte_ptep_set_access_flags(struct vm_area_struct *vma, 1565 unsigned long addr, pte_t *ptep, 1566 pte_t entry, int dirty); 1567extern void contpte_clear_young_dirty_ptes(struct vm_area_struct *vma, 1568 unsigned long addr, pte_t *ptep, 1569 unsigned int nr, cydp_t flags); 1570 1571static __always_inline void contpte_try_fold(struct mm_struct *mm, 1572 unsigned long addr, pte_t *ptep, pte_t pte) 1573{ 1574 /* 1575 * Only bother trying if both the virtual and physical addresses are 1576 * aligned and correspond to the last entry in a contig range. The core 1577 * code mostly modifies ranges from low to high, so this is the likely 1578 * the last modification in the contig range, so a good time to fold. 1579 * We can't fold special mappings, because there is no associated folio. 1580 */ 1581 1582 const unsigned long contmask = CONT_PTES - 1; 1583 bool valign = ((addr >> PAGE_SHIFT) & contmask) == contmask; 1584 1585 if (unlikely(valign)) { 1586 bool palign = (pte_pfn(pte) & contmask) == contmask; 1587 1588 if (unlikely(palign && 1589 pte_valid(pte) && !pte_cont(pte) && !pte_special(pte))) 1590 __contpte_try_fold(mm, addr, ptep, pte); 1591 } 1592} 1593 1594static __always_inline void contpte_try_unfold(struct mm_struct *mm, 1595 unsigned long addr, pte_t *ptep, pte_t pte) 1596{ 1597 if (unlikely(pte_valid_cont(pte))) 1598 __contpte_try_unfold(mm, addr, ptep, pte); 1599} 1600 1601#define pte_batch_hint pte_batch_hint 1602static inline unsigned int pte_batch_hint(pte_t *ptep, pte_t pte) 1603{ 1604 if (!pte_valid_cont(pte)) 1605 return 1; 1606 1607 return CONT_PTES - (((unsigned long)ptep >> 3) & (CONT_PTES - 1)); 1608} 1609 1610/* 1611 * The below functions constitute the public API that arm64 presents to the 1612 * core-mm to manipulate PTE entries within their page tables (or at least this 1613 * is the subset of the API that arm64 needs to implement). These public 1614 * versions will automatically and transparently apply the contiguous bit where 1615 * it makes sense to do so. Therefore any users that are contig-aware (e.g. 1616 * hugetlb, kernel mapper) should NOT use these APIs, but instead use the 1617 * private versions, which are prefixed with double underscore. All of these 1618 * APIs except for ptep_get_lockless() are expected to be called with the PTL 1619 * held. Although the contiguous bit is considered private to the 1620 * implementation, it is deliberately allowed to leak through the getters (e.g. 1621 * ptep_get()), back to core code. This is required so that pte_leaf_size() can 1622 * provide an accurate size for perf_get_pgtable_size(). But this leakage means 1623 * its possible a pte will be passed to a setter with the contiguous bit set, so 1624 * we explicitly clear the contiguous bit in those cases to prevent accidentally 1625 * setting it in the pgtable. 1626 */ 1627 1628#define ptep_get ptep_get 1629static inline pte_t ptep_get(pte_t *ptep) 1630{ 1631 pte_t pte = __ptep_get(ptep); 1632 1633 if (likely(!pte_valid_cont(pte))) 1634 return pte; 1635 1636 return contpte_ptep_get(ptep, pte); 1637} 1638 1639#define ptep_get_lockless ptep_get_lockless 1640static inline pte_t ptep_get_lockless(pte_t *ptep) 1641{ 1642 pte_t pte = __ptep_get(ptep); 1643 1644 if (likely(!pte_valid_cont(pte))) 1645 return pte; 1646 1647 return contpte_ptep_get_lockless(ptep); 1648} 1649 1650static inline void set_pte(pte_t *ptep, pte_t pte) 1651{ 1652 /* 1653 * We don't have the mm or vaddr so cannot unfold contig entries (since 1654 * it requires tlb maintenance). set_pte() is not used in core code, so 1655 * this should never even be called. Regardless do our best to service 1656 * any call and emit a warning if there is any attempt to set a pte on 1657 * top of an existing contig range. 1658 */ 1659 pte_t orig_pte = __ptep_get(ptep); 1660 1661 WARN_ON_ONCE(pte_valid_cont(orig_pte)); 1662 __set_pte(ptep, pte_mknoncont(pte)); 1663} 1664 1665#define set_ptes set_ptes 1666static __always_inline void set_ptes(struct mm_struct *mm, unsigned long addr, 1667 pte_t *ptep, pte_t pte, unsigned int nr) 1668{ 1669 pte = pte_mknoncont(pte); 1670 1671 if (likely(nr == 1)) { 1672 contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); 1673 __set_ptes(mm, addr, ptep, pte, 1); 1674 contpte_try_fold(mm, addr, ptep, pte); 1675 } else { 1676 contpte_set_ptes(mm, addr, ptep, pte, nr); 1677 } 1678} 1679 1680static inline void pte_clear(struct mm_struct *mm, 1681 unsigned long addr, pte_t *ptep) 1682{ 1683 contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); 1684 __pte_clear(mm, addr, ptep); 1685} 1686 1687#define clear_full_ptes clear_full_ptes 1688static inline void clear_full_ptes(struct mm_struct *mm, unsigned long addr, 1689 pte_t *ptep, unsigned int nr, int full) 1690{ 1691 if (likely(nr == 1)) { 1692 contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); 1693 __clear_full_ptes(mm, addr, ptep, nr, full); 1694 } else { 1695 contpte_clear_full_ptes(mm, addr, ptep, nr, full); 1696 } 1697} 1698 1699#define get_and_clear_full_ptes get_and_clear_full_ptes 1700static inline pte_t get_and_clear_full_ptes(struct mm_struct *mm, 1701 unsigned long addr, pte_t *ptep, 1702 unsigned int nr, int full) 1703{ 1704 pte_t pte; 1705 1706 if (likely(nr == 1)) { 1707 contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); 1708 pte = __get_and_clear_full_ptes(mm, addr, ptep, nr, full); 1709 } else { 1710 pte = contpte_get_and_clear_full_ptes(mm, addr, ptep, nr, full); 1711 } 1712 1713 return pte; 1714} 1715 1716#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 1717static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 1718 unsigned long addr, pte_t *ptep) 1719{ 1720 contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); 1721 return __ptep_get_and_clear(mm, addr, ptep); 1722} 1723 1724#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 1725static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, 1726 unsigned long addr, pte_t *ptep) 1727{ 1728 pte_t orig_pte = __ptep_get(ptep); 1729 1730 if (likely(!pte_valid_cont(orig_pte))) 1731 return __ptep_test_and_clear_young(vma, addr, ptep); 1732 1733 return contpte_ptep_test_and_clear_young(vma, addr, ptep); 1734} 1735 1736#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 1737static inline int ptep_clear_flush_young(struct vm_area_struct *vma, 1738 unsigned long addr, pte_t *ptep) 1739{ 1740 pte_t orig_pte = __ptep_get(ptep); 1741 1742 if (likely(!pte_valid_cont(orig_pte))) 1743 return __ptep_clear_flush_young(vma, addr, ptep); 1744 1745 return contpte_ptep_clear_flush_young(vma, addr, ptep); 1746} 1747 1748#define wrprotect_ptes wrprotect_ptes 1749static __always_inline void wrprotect_ptes(struct mm_struct *mm, 1750 unsigned long addr, pte_t *ptep, unsigned int nr) 1751{ 1752 if (likely(nr == 1)) { 1753 /* 1754 * Optimization: wrprotect_ptes() can only be called for present 1755 * ptes so we only need to check contig bit as condition for 1756 * unfold, and we can remove the contig bit from the pte we read 1757 * to avoid re-reading. This speeds up fork() which is sensitive 1758 * for order-0 folios. Equivalent to contpte_try_unfold(). 1759 */ 1760 pte_t orig_pte = __ptep_get(ptep); 1761 1762 if (unlikely(pte_cont(orig_pte))) { 1763 __contpte_try_unfold(mm, addr, ptep, orig_pte); 1764 orig_pte = pte_mknoncont(orig_pte); 1765 } 1766 ___ptep_set_wrprotect(mm, addr, ptep, orig_pte); 1767 } else { 1768 contpte_wrprotect_ptes(mm, addr, ptep, nr); 1769 } 1770} 1771 1772#define __HAVE_ARCH_PTEP_SET_WRPROTECT 1773static inline void ptep_set_wrprotect(struct mm_struct *mm, 1774 unsigned long addr, pte_t *ptep) 1775{ 1776 wrprotect_ptes(mm, addr, ptep, 1); 1777} 1778 1779#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 1780static inline int ptep_set_access_flags(struct vm_area_struct *vma, 1781 unsigned long addr, pte_t *ptep, 1782 pte_t entry, int dirty) 1783{ 1784 pte_t orig_pte = __ptep_get(ptep); 1785 1786 entry = pte_mknoncont(entry); 1787 1788 if (likely(!pte_valid_cont(orig_pte))) 1789 return __ptep_set_access_flags(vma, addr, ptep, entry, dirty); 1790 1791 return contpte_ptep_set_access_flags(vma, addr, ptep, entry, dirty); 1792} 1793 1794#define clear_young_dirty_ptes clear_young_dirty_ptes 1795static inline void clear_young_dirty_ptes(struct vm_area_struct *vma, 1796 unsigned long addr, pte_t *ptep, 1797 unsigned int nr, cydp_t flags) 1798{ 1799 if (likely(nr == 1 && !pte_cont(__ptep_get(ptep)))) 1800 __clear_young_dirty_ptes(vma, addr, ptep, nr, flags); 1801 else 1802 contpte_clear_young_dirty_ptes(vma, addr, ptep, nr, flags); 1803} 1804 1805#else /* CONFIG_ARM64_CONTPTE */ 1806 1807#define ptep_get __ptep_get 1808#define set_pte __set_pte 1809#define set_ptes __set_ptes 1810#define pte_clear __pte_clear 1811#define clear_full_ptes __clear_full_ptes 1812#define get_and_clear_full_ptes __get_and_clear_full_ptes 1813#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 1814#define ptep_get_and_clear __ptep_get_and_clear 1815#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 1816#define ptep_test_and_clear_young __ptep_test_and_clear_young 1817#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 1818#define ptep_clear_flush_young __ptep_clear_flush_young 1819#define __HAVE_ARCH_PTEP_SET_WRPROTECT 1820#define ptep_set_wrprotect __ptep_set_wrprotect 1821#define wrprotect_ptes __wrprotect_ptes 1822#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 1823#define ptep_set_access_flags __ptep_set_access_flags 1824#define clear_young_dirty_ptes __clear_young_dirty_ptes 1825 1826#endif /* CONFIG_ARM64_CONTPTE */ 1827 1828#endif /* !__ASSEMBLY__ */ 1829 1830#endif /* __ASM_PGTABLE_H */