at v6.19-rc4 62 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_PGTABLE_H 3#define _LINUX_PGTABLE_H 4 5#include <linux/pfn.h> 6#include <asm/pgtable.h> 7 8#define PMD_ORDER (PMD_SHIFT - PAGE_SHIFT) 9#define PUD_ORDER (PUD_SHIFT - PAGE_SHIFT) 10 11#ifndef __ASSEMBLY__ 12#ifdef CONFIG_MMU 13 14#include <linux/mm_types.h> 15#include <linux/bug.h> 16#include <linux/errno.h> 17#include <asm-generic/pgtable_uffd.h> 18#include <linux/page_table_check.h> 19 20#if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \ 21 defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS 22#error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{P4D,PUD,PMD}_FOLDED 23#endif 24 25/* 26 * On almost all architectures and configurations, 0 can be used as the 27 * upper ceiling to free_pgtables(): on many architectures it has the same 28 * effect as using TASK_SIZE. However, there is one configuration which 29 * must impose a more careful limit, to avoid freeing kernel pgtables. 30 */ 31#ifndef USER_PGTABLES_CEILING 32#define USER_PGTABLES_CEILING 0UL 33#endif 34 35/* 36 * This defines the first usable user address. Platforms 37 * can override its value with custom FIRST_USER_ADDRESS 38 * defined in their respective <asm/pgtable.h>. 39 */ 40#ifndef FIRST_USER_ADDRESS 41#define FIRST_USER_ADDRESS 0UL 42#endif 43 44/* 45 * This defines the generic helper for accessing PMD page 46 * table page. Although platforms can still override this 47 * via their respective <asm/pgtable.h>. 48 */ 49#ifndef pmd_pgtable 50#define pmd_pgtable(pmd) pmd_page(pmd) 51#endif 52 53#define pmd_folio(pmd) page_folio(pmd_page(pmd)) 54 55/* 56 * A page table page can be thought of an array like this: pXd_t[PTRS_PER_PxD] 57 * 58 * The pXx_index() functions return the index of the entry in the page 59 * table page which would control the given virtual address 60 * 61 * As these functions may be used by the same code for different levels of 62 * the page table folding, they are always available, regardless of 63 * CONFIG_PGTABLE_LEVELS value. For the folded levels they simply return 0 64 * because in such cases PTRS_PER_PxD equals 1. 65 */ 66 67static inline unsigned long pte_index(unsigned long address) 68{ 69 return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); 70} 71 72#ifndef pmd_index 73static inline unsigned long pmd_index(unsigned long address) 74{ 75 return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); 76} 77#define pmd_index pmd_index 78#endif 79 80#ifndef pud_index 81static inline unsigned long pud_index(unsigned long address) 82{ 83 return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1); 84} 85#define pud_index pud_index 86#endif 87 88#ifndef pgd_index 89/* Must be a compile-time constant, so implement it as a macro */ 90#define pgd_index(a) (((a) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) 91#endif 92 93#ifndef kernel_pte_init 94static inline void kernel_pte_init(void *addr) 95{ 96} 97#define kernel_pte_init kernel_pte_init 98#endif 99 100#ifndef pmd_init 101static inline void pmd_init(void *addr) 102{ 103} 104#define pmd_init pmd_init 105#endif 106 107#ifndef pud_init 108static inline void pud_init(void *addr) 109{ 110} 111#define pud_init pud_init 112#endif 113 114#ifndef pte_offset_kernel 115static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) 116{ 117 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address); 118} 119#define pte_offset_kernel pte_offset_kernel 120#endif 121 122#ifdef CONFIG_HIGHPTE 123#define __pte_map(pmd, address) \ 124 ((pte_t *)kmap_local_page(pmd_page(*(pmd))) + pte_index((address))) 125#define pte_unmap(pte) do { \ 126 kunmap_local((pte)); \ 127 rcu_read_unlock(); \ 128} while (0) 129#else 130static inline pte_t *__pte_map(pmd_t *pmd, unsigned long address) 131{ 132 return pte_offset_kernel(pmd, address); 133} 134static inline void pte_unmap(pte_t *pte) 135{ 136 rcu_read_unlock(); 137} 138#endif 139 140void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable); 141 142/* Find an entry in the second-level page table.. */ 143#ifndef pmd_offset 144static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) 145{ 146 return pud_pgtable(*pud) + pmd_index(address); 147} 148#define pmd_offset pmd_offset 149#endif 150 151#ifndef pud_offset 152static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) 153{ 154 return p4d_pgtable(*p4d) + pud_index(address); 155} 156#define pud_offset pud_offset 157#endif 158 159static inline pgd_t *pgd_offset_pgd(pgd_t *pgd, unsigned long address) 160{ 161 return (pgd + pgd_index(address)); 162}; 163 164/* 165 * a shortcut to get a pgd_t in a given mm 166 */ 167#ifndef pgd_offset 168#define pgd_offset(mm, address) pgd_offset_pgd((mm)->pgd, (address)) 169#endif 170 171/* 172 * a shortcut which implies the use of the kernel's pgd, instead 173 * of a process's 174 */ 175#define pgd_offset_k(address) pgd_offset(&init_mm, (address)) 176 177/* 178 * In many cases it is known that a virtual address is mapped at PMD or PTE 179 * level, so instead of traversing all the page table levels, we can get a 180 * pointer to the PMD entry in user or kernel page table or translate a virtual 181 * address to the pointer in the PTE in the kernel page tables with simple 182 * helpers. 183 */ 184static inline pmd_t *pmd_off(struct mm_struct *mm, unsigned long va) 185{ 186 return pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, va), va), va), va); 187} 188 189static inline pmd_t *pmd_off_k(unsigned long va) 190{ 191 return pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va); 192} 193 194static inline pte_t *virt_to_kpte(unsigned long vaddr) 195{ 196 pmd_t *pmd = pmd_off_k(vaddr); 197 198 return pmd_none(*pmd) ? NULL : pte_offset_kernel(pmd, vaddr); 199} 200 201#ifndef pmd_young 202static inline int pmd_young(pmd_t pmd) 203{ 204 return 0; 205} 206#endif 207 208#ifndef pmd_dirty 209static inline int pmd_dirty(pmd_t pmd) 210{ 211 return 0; 212} 213#endif 214 215/* 216 * A facility to provide lazy MMU batching. This allows PTE updates and 217 * page invalidations to be delayed until a call to leave lazy MMU mode 218 * is issued. Some architectures may benefit from doing this, and it is 219 * beneficial for both shadow and direct mode hypervisors, which may batch 220 * the PTE updates which happen during this window. Note that using this 221 * interface requires that read hazards be removed from the code. A read 222 * hazard could result in the direct mode hypervisor case, since the actual 223 * write to the page tables may not yet have taken place, so reads though 224 * a raw PTE pointer after it has been modified are not guaranteed to be 225 * up to date. 226 * 227 * In the general case, no lock is guaranteed to be held between entry and exit 228 * of the lazy mode. So the implementation must assume preemption may be enabled 229 * and cpu migration is possible; it must take steps to be robust against this. 230 * (In practice, for user PTE updates, the appropriate page table lock(s) are 231 * held, but for kernel PTE updates, no lock is held). Nesting is not permitted 232 * and the mode cannot be used in interrupt context. 233 */ 234#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE 235static inline void arch_enter_lazy_mmu_mode(void) {} 236static inline void arch_leave_lazy_mmu_mode(void) {} 237static inline void arch_flush_lazy_mmu_mode(void) {} 238#endif 239 240#ifndef pte_batch_hint 241/** 242 * pte_batch_hint - Number of pages that can be added to batch without scanning. 243 * @ptep: Page table pointer for the entry. 244 * @pte: Page table entry. 245 * 246 * Some architectures know that a set of contiguous ptes all map the same 247 * contiguous memory with the same permissions. In this case, it can provide a 248 * hint to aid pte batching without the core code needing to scan every pte. 249 * 250 * An architecture implementation may ignore the PTE accessed state. Further, 251 * the dirty state must apply atomically to all the PTEs described by the hint. 252 * 253 * May be overridden by the architecture, else pte_batch_hint is always 1. 254 */ 255static inline unsigned int pte_batch_hint(pte_t *ptep, pte_t pte) 256{ 257 return 1; 258} 259#endif 260 261#ifndef pte_advance_pfn 262static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr) 263{ 264 return __pte(pte_val(pte) + (nr << PFN_PTE_SHIFT)); 265} 266#endif 267 268#define pte_next_pfn(pte) pte_advance_pfn(pte, 1) 269 270#ifndef set_ptes 271/** 272 * set_ptes - Map consecutive pages to a contiguous range of addresses. 273 * @mm: Address space to map the pages into. 274 * @addr: Address to map the first page at. 275 * @ptep: Page table pointer for the first entry. 276 * @pte: Page table entry for the first page. 277 * @nr: Number of pages to map. 278 * 279 * When nr==1, initial state of pte may be present or not present, and new state 280 * may be present or not present. When nr>1, initial state of all ptes must be 281 * not present, and new state must be present. 282 * 283 * May be overridden by the architecture, or the architecture can define 284 * set_pte() and PFN_PTE_SHIFT. 285 * 286 * Context: The caller holds the page table lock. The pages all belong 287 * to the same folio. The PTEs are all in the same PMD. 288 */ 289static inline void set_ptes(struct mm_struct *mm, unsigned long addr, 290 pte_t *ptep, pte_t pte, unsigned int nr) 291{ 292 page_table_check_ptes_set(mm, ptep, pte, nr); 293 294 for (;;) { 295 set_pte(ptep, pte); 296 if (--nr == 0) 297 break; 298 ptep++; 299 pte = pte_next_pfn(pte); 300 } 301} 302#endif 303#define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep, pte, 1) 304 305#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 306extern int ptep_set_access_flags(struct vm_area_struct *vma, 307 unsigned long address, pte_t *ptep, 308 pte_t entry, int dirty); 309#endif 310 311#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 312#ifdef CONFIG_TRANSPARENT_HUGEPAGE 313extern int pmdp_set_access_flags(struct vm_area_struct *vma, 314 unsigned long address, pmd_t *pmdp, 315 pmd_t entry, int dirty); 316extern int pudp_set_access_flags(struct vm_area_struct *vma, 317 unsigned long address, pud_t *pudp, 318 pud_t entry, int dirty); 319#else 320static inline int pmdp_set_access_flags(struct vm_area_struct *vma, 321 unsigned long address, pmd_t *pmdp, 322 pmd_t entry, int dirty) 323{ 324 BUILD_BUG(); 325 return 0; 326} 327static inline int pudp_set_access_flags(struct vm_area_struct *vma, 328 unsigned long address, pud_t *pudp, 329 pud_t entry, int dirty) 330{ 331 BUILD_BUG(); 332 return 0; 333} 334#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 335#endif 336 337#ifndef ptep_get 338static inline pte_t ptep_get(pte_t *ptep) 339{ 340 return READ_ONCE(*ptep); 341} 342#endif 343 344#ifndef pmdp_get 345static inline pmd_t pmdp_get(pmd_t *pmdp) 346{ 347 return READ_ONCE(*pmdp); 348} 349#endif 350 351#ifndef pudp_get 352static inline pud_t pudp_get(pud_t *pudp) 353{ 354 return READ_ONCE(*pudp); 355} 356#endif 357 358#ifndef p4dp_get 359static inline p4d_t p4dp_get(p4d_t *p4dp) 360{ 361 return READ_ONCE(*p4dp); 362} 363#endif 364 365#ifndef pgdp_get 366static inline pgd_t pgdp_get(pgd_t *pgdp) 367{ 368 return READ_ONCE(*pgdp); 369} 370#endif 371 372#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 373static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, 374 unsigned long address, 375 pte_t *ptep) 376{ 377 pte_t pte = ptep_get(ptep); 378 int r = 1; 379 if (!pte_young(pte)) 380 r = 0; 381 else 382 set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte)); 383 return r; 384} 385#endif 386 387#ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG 388#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG) 389static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, 390 unsigned long address, 391 pmd_t *pmdp) 392{ 393 pmd_t pmd = *pmdp; 394 int r = 1; 395 if (!pmd_young(pmd)) 396 r = 0; 397 else 398 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd)); 399 return r; 400} 401#else 402static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, 403 unsigned long address, 404 pmd_t *pmdp) 405{ 406 BUILD_BUG(); 407 return 0; 408} 409#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG */ 410#endif 411 412#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 413int ptep_clear_flush_young(struct vm_area_struct *vma, 414 unsigned long address, pte_t *ptep); 415#endif 416 417#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH 418#ifdef CONFIG_TRANSPARENT_HUGEPAGE 419extern int pmdp_clear_flush_young(struct vm_area_struct *vma, 420 unsigned long address, pmd_t *pmdp); 421#else 422/* 423 * Despite relevant to THP only, this API is called from generic rmap code 424 * under PageTransHuge(), hence needs a dummy implementation for !THP 425 */ 426static inline int pmdp_clear_flush_young(struct vm_area_struct *vma, 427 unsigned long address, pmd_t *pmdp) 428{ 429 BUILD_BUG(); 430 return 0; 431} 432#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 433#endif 434 435#ifndef arch_has_hw_nonleaf_pmd_young 436/* 437 * Return whether the accessed bit in non-leaf PMD entries is supported on the 438 * local CPU. 439 */ 440static inline bool arch_has_hw_nonleaf_pmd_young(void) 441{ 442 return IS_ENABLED(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG); 443} 444#endif 445 446#ifndef arch_has_hw_pte_young 447/* 448 * Return whether the accessed bit is supported on the local CPU. 449 * 450 * This stub assumes accessing through an old PTE triggers a page fault. 451 * Architectures that automatically set the access bit should overwrite it. 452 */ 453static inline bool arch_has_hw_pte_young(void) 454{ 455 return IS_ENABLED(CONFIG_ARCH_HAS_HW_PTE_YOUNG); 456} 457#endif 458 459#ifndef exec_folio_order 460/* 461 * Returns preferred minimum folio order for executable file-backed memory. Must 462 * be in range [0, PMD_ORDER). Default to order-0. 463 */ 464static inline unsigned int exec_folio_order(void) 465{ 466 return 0; 467} 468#endif 469 470#ifndef arch_check_zapped_pte 471static inline void arch_check_zapped_pte(struct vm_area_struct *vma, 472 pte_t pte) 473{ 474} 475#endif 476 477#ifndef arch_check_zapped_pmd 478static inline void arch_check_zapped_pmd(struct vm_area_struct *vma, 479 pmd_t pmd) 480{ 481} 482#endif 483 484#ifndef arch_check_zapped_pud 485static inline void arch_check_zapped_pud(struct vm_area_struct *vma, pud_t pud) 486{ 487} 488#endif 489 490#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR 491static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 492 unsigned long address, 493 pte_t *ptep) 494{ 495 pte_t pte = ptep_get(ptep); 496 pte_clear(mm, address, ptep); 497 page_table_check_pte_clear(mm, pte); 498 return pte; 499} 500#endif 501 502#ifndef clear_young_dirty_ptes 503/** 504 * clear_young_dirty_ptes - Mark PTEs that map consecutive pages of the 505 * same folio as old/clean. 506 * @mm: Address space the pages are mapped into. 507 * @addr: Address the first page is mapped at. 508 * @ptep: Page table pointer for the first entry. 509 * @nr: Number of entries to mark old/clean. 510 * @flags: Flags to modify the PTE batch semantics. 511 * 512 * May be overridden by the architecture; otherwise, implemented by 513 * get_and_clear/modify/set for each pte in the range. 514 * 515 * Note that PTE bits in the PTE range besides the PFN can differ. For example, 516 * some PTEs might be write-protected. 517 * 518 * Context: The caller holds the page table lock. The PTEs map consecutive 519 * pages that belong to the same folio. The PTEs are all in the same PMD. 520 */ 521static inline void clear_young_dirty_ptes(struct vm_area_struct *vma, 522 unsigned long addr, pte_t *ptep, 523 unsigned int nr, cydp_t flags) 524{ 525 pte_t pte; 526 527 for (;;) { 528 if (flags == CYDP_CLEAR_YOUNG) 529 ptep_test_and_clear_young(vma, addr, ptep); 530 else { 531 pte = ptep_get_and_clear(vma->vm_mm, addr, ptep); 532 if (flags & CYDP_CLEAR_YOUNG) 533 pte = pte_mkold(pte); 534 if (flags & CYDP_CLEAR_DIRTY) 535 pte = pte_mkclean(pte); 536 set_pte_at(vma->vm_mm, addr, ptep, pte); 537 } 538 if (--nr == 0) 539 break; 540 ptep++; 541 addr += PAGE_SIZE; 542 } 543} 544#endif 545 546static inline void ptep_clear(struct mm_struct *mm, unsigned long addr, 547 pte_t *ptep) 548{ 549 pte_t pte = ptep_get(ptep); 550 551 pte_clear(mm, addr, ptep); 552 /* 553 * No need for ptep_get_and_clear(): page table check doesn't care about 554 * any bits that could have been set by HW concurrently. 555 */ 556 page_table_check_pte_clear(mm, pte); 557} 558 559#ifdef CONFIG_GUP_GET_PXX_LOW_HIGH 560/* 561 * For walking the pagetables without holding any locks. Some architectures 562 * (eg x86-32 PAE) cannot load the entries atomically without using expensive 563 * instructions. We are guaranteed that a PTE will only either go from not 564 * present to present, or present to not present -- it will not switch to a 565 * completely different present page without a TLB flush inbetween; which we 566 * are blocking by holding interrupts off. 567 * 568 * Setting ptes from not present to present goes: 569 * 570 * ptep->pte_high = h; 571 * smp_wmb(); 572 * ptep->pte_low = l; 573 * 574 * And present to not present goes: 575 * 576 * ptep->pte_low = 0; 577 * smp_wmb(); 578 * ptep->pte_high = 0; 579 * 580 * We must ensure here that the load of pte_low sees 'l' IFF pte_high sees 'h'. 581 * We load pte_high *after* loading pte_low, which ensures we don't see an older 582 * value of pte_high. *Then* we recheck pte_low, which ensures that we haven't 583 * picked up a changed pte high. We might have gotten rubbish values from 584 * pte_low and pte_high, but we are guaranteed that pte_low will not have the 585 * present bit set *unless* it is 'l'. Because get_user_pages_fast() only 586 * operates on present ptes we're safe. 587 */ 588static inline pte_t ptep_get_lockless(pte_t *ptep) 589{ 590 pte_t pte; 591 592 do { 593 pte.pte_low = ptep->pte_low; 594 smp_rmb(); 595 pte.pte_high = ptep->pte_high; 596 smp_rmb(); 597 } while (unlikely(pte.pte_low != ptep->pte_low)); 598 599 return pte; 600} 601#define ptep_get_lockless ptep_get_lockless 602 603#if CONFIG_PGTABLE_LEVELS > 2 604static inline pmd_t pmdp_get_lockless(pmd_t *pmdp) 605{ 606 pmd_t pmd; 607 608 do { 609 pmd.pmd_low = pmdp->pmd_low; 610 smp_rmb(); 611 pmd.pmd_high = pmdp->pmd_high; 612 smp_rmb(); 613 } while (unlikely(pmd.pmd_low != pmdp->pmd_low)); 614 615 return pmd; 616} 617#define pmdp_get_lockless pmdp_get_lockless 618#define pmdp_get_lockless_sync() tlb_remove_table_sync_one() 619#endif /* CONFIG_PGTABLE_LEVELS > 2 */ 620#endif /* CONFIG_GUP_GET_PXX_LOW_HIGH */ 621 622/* 623 * We require that the PTE can be read atomically. 624 */ 625#ifndef ptep_get_lockless 626static inline pte_t ptep_get_lockless(pte_t *ptep) 627{ 628 return ptep_get(ptep); 629} 630#endif 631 632#ifndef pmdp_get_lockless 633static inline pmd_t pmdp_get_lockless(pmd_t *pmdp) 634{ 635 return pmdp_get(pmdp); 636} 637static inline void pmdp_get_lockless_sync(void) 638{ 639} 640#endif 641 642#ifdef CONFIG_TRANSPARENT_HUGEPAGE 643#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR 644static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, 645 unsigned long address, 646 pmd_t *pmdp) 647{ 648 pmd_t pmd = *pmdp; 649 650 pmd_clear(pmdp); 651 page_table_check_pmd_clear(mm, pmd); 652 653 return pmd; 654} 655#endif /* __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR */ 656#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR 657static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm, 658 unsigned long address, 659 pud_t *pudp) 660{ 661 pud_t pud = *pudp; 662 663 pud_clear(pudp); 664 page_table_check_pud_clear(mm, pud); 665 666 return pud; 667} 668#endif /* __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR */ 669#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 670 671#ifdef CONFIG_TRANSPARENT_HUGEPAGE 672#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL 673static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma, 674 unsigned long address, pmd_t *pmdp, 675 int full) 676{ 677 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); 678} 679#endif 680 681#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL 682static inline pud_t pudp_huge_get_and_clear_full(struct vm_area_struct *vma, 683 unsigned long address, pud_t *pudp, 684 int full) 685{ 686 return pudp_huge_get_and_clear(vma->vm_mm, address, pudp); 687} 688#endif 689#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 690 691#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL 692static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, 693 unsigned long address, pte_t *ptep, 694 int full) 695{ 696 return ptep_get_and_clear(mm, address, ptep); 697} 698#endif 699 700#ifndef get_and_clear_full_ptes 701/** 702 * get_and_clear_full_ptes - Clear present PTEs that map consecutive pages of 703 * the same folio, collecting dirty/accessed bits. 704 * @mm: Address space the pages are mapped into. 705 * @addr: Address the first page is mapped at. 706 * @ptep: Page table pointer for the first entry. 707 * @nr: Number of entries to clear. 708 * @full: Whether we are clearing a full mm. 709 * 710 * May be overridden by the architecture; otherwise, implemented as a simple 711 * loop over ptep_get_and_clear_full(), merging dirty/accessed bits into the 712 * returned PTE. 713 * 714 * Note that PTE bits in the PTE range besides the PFN can differ. For example, 715 * some PTEs might be write-protected. 716 * 717 * Context: The caller holds the page table lock. The PTEs map consecutive 718 * pages that belong to the same folio. The PTEs are all in the same PMD. 719 */ 720static inline pte_t get_and_clear_full_ptes(struct mm_struct *mm, 721 unsigned long addr, pte_t *ptep, unsigned int nr, int full) 722{ 723 pte_t pte, tmp_pte; 724 725 pte = ptep_get_and_clear_full(mm, addr, ptep, full); 726 while (--nr) { 727 ptep++; 728 addr += PAGE_SIZE; 729 tmp_pte = ptep_get_and_clear_full(mm, addr, ptep, full); 730 if (pte_dirty(tmp_pte)) 731 pte = pte_mkdirty(pte); 732 if (pte_young(tmp_pte)) 733 pte = pte_mkyoung(pte); 734 } 735 return pte; 736} 737#endif 738 739/** 740 * get_and_clear_ptes - Clear present PTEs that map consecutive pages of 741 * the same folio, collecting dirty/accessed bits. 742 * @mm: Address space the pages are mapped into. 743 * @addr: Address the first page is mapped at. 744 * @ptep: Page table pointer for the first entry. 745 * @nr: Number of entries to clear. 746 * 747 * Use this instead of get_and_clear_full_ptes() if it is known that we don't 748 * need to clear the full mm, which is mostly the case. 749 * 750 * Note that PTE bits in the PTE range besides the PFN can differ. For example, 751 * some PTEs might be write-protected. 752 * 753 * Context: The caller holds the page table lock. The PTEs map consecutive 754 * pages that belong to the same folio. The PTEs are all in the same PMD. 755 */ 756static inline pte_t get_and_clear_ptes(struct mm_struct *mm, unsigned long addr, 757 pte_t *ptep, unsigned int nr) 758{ 759 return get_and_clear_full_ptes(mm, addr, ptep, nr, 0); 760} 761 762#ifndef clear_full_ptes 763/** 764 * clear_full_ptes - Clear present PTEs that map consecutive pages of the same 765 * folio. 766 * @mm: Address space the pages are mapped into. 767 * @addr: Address the first page is mapped at. 768 * @ptep: Page table pointer for the first entry. 769 * @nr: Number of entries to clear. 770 * @full: Whether we are clearing a full mm. 771 * 772 * May be overridden by the architecture; otherwise, implemented as a simple 773 * loop over ptep_get_and_clear_full(). 774 * 775 * Note that PTE bits in the PTE range besides the PFN can differ. For example, 776 * some PTEs might be write-protected. 777 * 778 * Context: The caller holds the page table lock. The PTEs map consecutive 779 * pages that belong to the same folio. The PTEs are all in the same PMD. 780 */ 781static inline void clear_full_ptes(struct mm_struct *mm, unsigned long addr, 782 pte_t *ptep, unsigned int nr, int full) 783{ 784 for (;;) { 785 ptep_get_and_clear_full(mm, addr, ptep, full); 786 if (--nr == 0) 787 break; 788 ptep++; 789 addr += PAGE_SIZE; 790 } 791} 792#endif 793 794/** 795 * clear_ptes - Clear present PTEs that map consecutive pages of the same folio. 796 * @mm: Address space the pages are mapped into. 797 * @addr: Address the first page is mapped at. 798 * @ptep: Page table pointer for the first entry. 799 * @nr: Number of entries to clear. 800 * 801 * Use this instead of clear_full_ptes() if it is known that we don't need to 802 * clear the full mm, which is mostly the case. 803 * 804 * Note that PTE bits in the PTE range besides the PFN can differ. For example, 805 * some PTEs might be write-protected. 806 * 807 * Context: The caller holds the page table lock. The PTEs map consecutive 808 * pages that belong to the same folio. The PTEs are all in the same PMD. 809 */ 810static inline void clear_ptes(struct mm_struct *mm, unsigned long addr, 811 pte_t *ptep, unsigned int nr) 812{ 813 clear_full_ptes(mm, addr, ptep, nr, 0); 814} 815 816/* 817 * If two threads concurrently fault at the same page, the thread that 818 * won the race updates the PTE and its local TLB/Cache. The other thread 819 * gives up, simply does nothing, and continues; on architectures where 820 * software can update TLB, local TLB can be updated here to avoid next page 821 * fault. This function updates TLB only, do nothing with cache or others. 822 * It is the difference with function update_mmu_cache. 823 */ 824#ifndef update_mmu_tlb_range 825static inline void update_mmu_tlb_range(struct vm_area_struct *vma, 826 unsigned long address, pte_t *ptep, unsigned int nr) 827{ 828} 829#endif 830 831static inline void update_mmu_tlb(struct vm_area_struct *vma, 832 unsigned long address, pte_t *ptep) 833{ 834 update_mmu_tlb_range(vma, address, ptep, 1); 835} 836 837/* 838 * Some architectures may be able to avoid expensive synchronization 839 * primitives when modifications are made to PTE's which are already 840 * not present, or in the process of an address space destruction. 841 */ 842#ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL 843static inline void pte_clear_not_present_full(struct mm_struct *mm, 844 unsigned long address, 845 pte_t *ptep, 846 int full) 847{ 848 pte_clear(mm, address, ptep); 849} 850#endif 851 852#ifndef clear_not_present_full_ptes 853/** 854 * clear_not_present_full_ptes - Clear multiple not present PTEs which are 855 * consecutive in the pgtable. 856 * @mm: Address space the ptes represent. 857 * @addr: Address of the first pte. 858 * @ptep: Page table pointer for the first entry. 859 * @nr: Number of entries to clear. 860 * @full: Whether we are clearing a full mm. 861 * 862 * May be overridden by the architecture; otherwise, implemented as a simple 863 * loop over pte_clear_not_present_full(). 864 * 865 * Context: The caller holds the page table lock. The PTEs are all not present. 866 * The PTEs are all in the same PMD. 867 */ 868static inline void clear_not_present_full_ptes(struct mm_struct *mm, 869 unsigned long addr, pte_t *ptep, unsigned int nr, int full) 870{ 871 for (;;) { 872 pte_clear_not_present_full(mm, addr, ptep, full); 873 if (--nr == 0) 874 break; 875 ptep++; 876 addr += PAGE_SIZE; 877 } 878} 879#endif 880 881#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH 882extern pte_t ptep_clear_flush(struct vm_area_struct *vma, 883 unsigned long address, 884 pte_t *ptep); 885#endif 886 887#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH 888extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, 889 unsigned long address, 890 pmd_t *pmdp); 891extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, 892 unsigned long address, 893 pud_t *pudp); 894#endif 895 896#ifndef pte_mkwrite 897static inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma) 898{ 899 return pte_mkwrite_novma(pte); 900} 901#endif 902 903#if defined(CONFIG_ARCH_WANT_PMD_MKWRITE) && !defined(pmd_mkwrite) 904static inline pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) 905{ 906 return pmd_mkwrite_novma(pmd); 907} 908#endif 909 910#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT 911struct mm_struct; 912static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) 913{ 914 pte_t old_pte = ptep_get(ptep); 915 set_pte_at(mm, address, ptep, pte_wrprotect(old_pte)); 916} 917#endif 918 919#ifndef wrprotect_ptes 920/** 921 * wrprotect_ptes - Write-protect PTEs that map consecutive pages of the same 922 * folio. 923 * @mm: Address space the pages are mapped into. 924 * @addr: Address the first page is mapped at. 925 * @ptep: Page table pointer for the first entry. 926 * @nr: Number of entries to write-protect. 927 * 928 * May be overridden by the architecture; otherwise, implemented as a simple 929 * loop over ptep_set_wrprotect(). 930 * 931 * Note that PTE bits in the PTE range besides the PFN can differ. For example, 932 * some PTEs might be write-protected. 933 * 934 * Context: The caller holds the page table lock. The PTEs map consecutive 935 * pages that belong to the same folio. The PTEs are all in the same PMD. 936 */ 937static inline void wrprotect_ptes(struct mm_struct *mm, unsigned long addr, 938 pte_t *ptep, unsigned int nr) 939{ 940 for (;;) { 941 ptep_set_wrprotect(mm, addr, ptep); 942 if (--nr == 0) 943 break; 944 ptep++; 945 addr += PAGE_SIZE; 946 } 947} 948#endif 949 950/* 951 * On some architectures hardware does not set page access bit when accessing 952 * memory page, it is responsibility of software setting this bit. It brings 953 * out extra page fault penalty to track page access bit. For optimization page 954 * access bit can be set during all page fault flow on these arches. 955 * To be differentiate with macro pte_mkyoung, this macro is used on platforms 956 * where software maintains page access bit. 957 */ 958#ifndef pte_sw_mkyoung 959static inline pte_t pte_sw_mkyoung(pte_t pte) 960{ 961 return pte; 962} 963#define pte_sw_mkyoung pte_sw_mkyoung 964#endif 965 966#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT 967#ifdef CONFIG_TRANSPARENT_HUGEPAGE 968static inline void pmdp_set_wrprotect(struct mm_struct *mm, 969 unsigned long address, pmd_t *pmdp) 970{ 971 pmd_t old_pmd = *pmdp; 972 set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd)); 973} 974#else 975static inline void pmdp_set_wrprotect(struct mm_struct *mm, 976 unsigned long address, pmd_t *pmdp) 977{ 978 BUILD_BUG(); 979} 980#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 981#endif 982#ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT 983#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 984#ifdef CONFIG_TRANSPARENT_HUGEPAGE 985static inline void pudp_set_wrprotect(struct mm_struct *mm, 986 unsigned long address, pud_t *pudp) 987{ 988 pud_t old_pud = *pudp; 989 990 set_pud_at(mm, address, pudp, pud_wrprotect(old_pud)); 991} 992#else 993static inline void pudp_set_wrprotect(struct mm_struct *mm, 994 unsigned long address, pud_t *pudp) 995{ 996 BUILD_BUG(); 997} 998#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 999#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 1000#endif 1001 1002#ifndef pmdp_collapse_flush 1003#ifdef CONFIG_TRANSPARENT_HUGEPAGE 1004extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, 1005 unsigned long address, pmd_t *pmdp); 1006#else 1007static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, 1008 unsigned long address, 1009 pmd_t *pmdp) 1010{ 1011 BUILD_BUG(); 1012 return *pmdp; 1013} 1014#define pmdp_collapse_flush pmdp_collapse_flush 1015#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1016#endif 1017 1018#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT 1019extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 1020 pgtable_t pgtable); 1021#endif 1022 1023#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW 1024extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); 1025#endif 1026 1027#ifndef arch_needs_pgtable_deposit 1028#define arch_needs_pgtable_deposit() (false) 1029#endif 1030 1031#ifdef CONFIG_TRANSPARENT_HUGEPAGE 1032/* 1033 * This is an implementation of pmdp_establish() that is only suitable for an 1034 * architecture that doesn't have hardware dirty/accessed bits. In this case we 1035 * can't race with CPU which sets these bits and non-atomic approach is fine. 1036 */ 1037static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma, 1038 unsigned long address, pmd_t *pmdp, pmd_t pmd) 1039{ 1040 pmd_t old_pmd = *pmdp; 1041 set_pmd_at(vma->vm_mm, address, pmdp, pmd); 1042 return old_pmd; 1043} 1044#endif 1045 1046#ifndef __HAVE_ARCH_PMDP_INVALIDATE 1047extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 1048 pmd_t *pmdp); 1049#endif 1050 1051#ifndef __HAVE_ARCH_PMDP_INVALIDATE_AD 1052 1053/* 1054 * pmdp_invalidate_ad() invalidates the PMD while changing a transparent 1055 * hugepage mapping in the page tables. This function is similar to 1056 * pmdp_invalidate(), but should only be used if the access and dirty bits would 1057 * not be cleared by the software in the new PMD value. The function ensures 1058 * that hardware changes of the access and dirty bits updates would not be lost. 1059 * 1060 * Doing so can allow in certain architectures to avoid a TLB flush in most 1061 * cases. Yet, another TLB flush might be necessary later if the PMD update 1062 * itself requires such flush (e.g., if protection was set to be stricter). Yet, 1063 * even when a TLB flush is needed because of the update, the caller may be able 1064 * to batch these TLB flushing operations, so fewer TLB flush operations are 1065 * needed. 1066 */ 1067extern pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, 1068 unsigned long address, pmd_t *pmdp); 1069#endif 1070 1071#ifndef __HAVE_ARCH_PTE_SAME 1072static inline int pte_same(pte_t pte_a, pte_t pte_b) 1073{ 1074 return pte_val(pte_a) == pte_val(pte_b); 1075} 1076#endif 1077 1078#ifndef __HAVE_ARCH_PTE_UNUSED 1079/* 1080 * Some architectures provide facilities to virtualization guests 1081 * so that they can flag allocated pages as unused. This allows the 1082 * host to transparently reclaim unused pages. This function returns 1083 * whether the pte's page is unused. 1084 */ 1085static inline int pte_unused(pte_t pte) 1086{ 1087 return 0; 1088} 1089#endif 1090 1091#ifndef pte_access_permitted 1092#define pte_access_permitted(pte, write) \ 1093 (pte_present(pte) && (!(write) || pte_write(pte))) 1094#endif 1095 1096#ifndef pmd_access_permitted 1097#define pmd_access_permitted(pmd, write) \ 1098 (pmd_present(pmd) && (!(write) || pmd_write(pmd))) 1099#endif 1100 1101#ifndef pud_access_permitted 1102#define pud_access_permitted(pud, write) \ 1103 (pud_present(pud) && (!(write) || pud_write(pud))) 1104#endif 1105 1106#ifndef p4d_access_permitted 1107#define p4d_access_permitted(p4d, write) \ 1108 (p4d_present(p4d) && (!(write) || p4d_write(p4d))) 1109#endif 1110 1111#ifndef pgd_access_permitted 1112#define pgd_access_permitted(pgd, write) \ 1113 (pgd_present(pgd) && (!(write) || pgd_write(pgd))) 1114#endif 1115 1116#ifndef __HAVE_ARCH_PMD_SAME 1117static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) 1118{ 1119 return pmd_val(pmd_a) == pmd_val(pmd_b); 1120} 1121#endif 1122 1123#ifndef pud_same 1124static inline int pud_same(pud_t pud_a, pud_t pud_b) 1125{ 1126 return pud_val(pud_a) == pud_val(pud_b); 1127} 1128#define pud_same pud_same 1129#endif 1130 1131#ifndef __HAVE_ARCH_P4D_SAME 1132static inline int p4d_same(p4d_t p4d_a, p4d_t p4d_b) 1133{ 1134 return p4d_val(p4d_a) == p4d_val(p4d_b); 1135} 1136#endif 1137 1138#ifndef __HAVE_ARCH_PGD_SAME 1139static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b) 1140{ 1141 return pgd_val(pgd_a) == pgd_val(pgd_b); 1142} 1143#endif 1144 1145#ifndef __HAVE_ARCH_DO_SWAP_PAGE 1146static inline void arch_do_swap_page_nr(struct mm_struct *mm, 1147 struct vm_area_struct *vma, 1148 unsigned long addr, 1149 pte_t pte, pte_t oldpte, 1150 int nr) 1151{ 1152 1153} 1154#else 1155/* 1156 * Some architectures support metadata associated with a page. When a 1157 * page is being swapped out, this metadata must be saved so it can be 1158 * restored when the page is swapped back in. SPARC M7 and newer 1159 * processors support an ADI (Application Data Integrity) tag for the 1160 * page as metadata for the page. arch_do_swap_page() can restore this 1161 * metadata when a page is swapped back in. 1162 */ 1163static inline void arch_do_swap_page_nr(struct mm_struct *mm, 1164 struct vm_area_struct *vma, 1165 unsigned long addr, 1166 pte_t pte, pte_t oldpte, 1167 int nr) 1168{ 1169 for (int i = 0; i < nr; i++) { 1170 arch_do_swap_page(vma->vm_mm, vma, addr + i * PAGE_SIZE, 1171 pte_advance_pfn(pte, i), 1172 pte_advance_pfn(oldpte, i)); 1173 } 1174} 1175#endif 1176 1177#ifndef __HAVE_ARCH_UNMAP_ONE 1178/* 1179 * Some architectures support metadata associated with a page. When a 1180 * page is being swapped out, this metadata must be saved so it can be 1181 * restored when the page is swapped back in. SPARC M7 and newer 1182 * processors support an ADI (Application Data Integrity) tag for the 1183 * page as metadata for the page. arch_unmap_one() can save this 1184 * metadata on a swap-out of a page. 1185 */ 1186static inline int arch_unmap_one(struct mm_struct *mm, 1187 struct vm_area_struct *vma, 1188 unsigned long addr, 1189 pte_t orig_pte) 1190{ 1191 return 0; 1192} 1193#endif 1194 1195/* 1196 * Allow architectures to preserve additional metadata associated with 1197 * swapped-out pages. The corresponding __HAVE_ARCH_SWAP_* macros and function 1198 * prototypes must be defined in the arch-specific asm/pgtable.h file. 1199 */ 1200#ifndef __HAVE_ARCH_PREPARE_TO_SWAP 1201static inline int arch_prepare_to_swap(struct folio *folio) 1202{ 1203 return 0; 1204} 1205#endif 1206 1207#ifndef __HAVE_ARCH_SWAP_INVALIDATE 1208static inline void arch_swap_invalidate_page(int type, pgoff_t offset) 1209{ 1210} 1211 1212static inline void arch_swap_invalidate_area(int type) 1213{ 1214} 1215#endif 1216 1217#ifndef __HAVE_ARCH_SWAP_RESTORE 1218static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio) 1219{ 1220} 1221#endif 1222 1223#ifndef __HAVE_ARCH_MOVE_PTE 1224#define move_pte(pte, old_addr, new_addr) (pte) 1225#endif 1226 1227#ifndef pte_accessible 1228# define pte_accessible(mm, pte) ((void)(pte), 1) 1229#endif 1230 1231#ifndef flush_tlb_fix_spurious_fault 1232#define flush_tlb_fix_spurious_fault(vma, address, ptep) flush_tlb_page(vma, address) 1233#endif 1234 1235#ifndef flush_tlb_fix_spurious_fault_pmd 1236#define flush_tlb_fix_spurious_fault_pmd(vma, address, pmdp) do { } while (0) 1237#endif 1238 1239/* 1240 * When walking page tables, get the address of the next boundary, 1241 * or the end address of the range if that comes earlier. Although no 1242 * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout. 1243 */ 1244 1245#define pgd_addr_end(addr, end) \ 1246({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \ 1247 (__boundary - 1 < (end) - 1)? __boundary: (end); \ 1248}) 1249 1250#ifndef p4d_addr_end 1251#define p4d_addr_end(addr, end) \ 1252({ unsigned long __boundary = ((addr) + P4D_SIZE) & P4D_MASK; \ 1253 (__boundary - 1 < (end) - 1)? __boundary: (end); \ 1254}) 1255#endif 1256 1257#ifndef pud_addr_end 1258#define pud_addr_end(addr, end) \ 1259({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \ 1260 (__boundary - 1 < (end) - 1)? __boundary: (end); \ 1261}) 1262#endif 1263 1264#ifndef pmd_addr_end 1265#define pmd_addr_end(addr, end) \ 1266({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \ 1267 (__boundary - 1 < (end) - 1)? __boundary: (end); \ 1268}) 1269#endif 1270 1271/* 1272 * When walking page tables, we usually want to skip any p?d_none entries; 1273 * and any p?d_bad entries - reporting the error before resetting to none. 1274 * Do the tests inline, but report and clear the bad entry in mm/memory.c. 1275 */ 1276void pgd_clear_bad(pgd_t *); 1277 1278#ifndef __PAGETABLE_P4D_FOLDED 1279void p4d_clear_bad(p4d_t *); 1280#else 1281#define p4d_clear_bad(p4d) do { } while (0) 1282#endif 1283 1284#ifndef __PAGETABLE_PUD_FOLDED 1285void pud_clear_bad(pud_t *); 1286#else 1287#define pud_clear_bad(p4d) do { } while (0) 1288#endif 1289 1290void pmd_clear_bad(pmd_t *); 1291 1292static inline int pgd_none_or_clear_bad(pgd_t *pgd) 1293{ 1294 if (pgd_none(*pgd)) 1295 return 1; 1296 if (unlikely(pgd_bad(*pgd))) { 1297 pgd_clear_bad(pgd); 1298 return 1; 1299 } 1300 return 0; 1301} 1302 1303static inline int p4d_none_or_clear_bad(p4d_t *p4d) 1304{ 1305 if (p4d_none(*p4d)) 1306 return 1; 1307 if (unlikely(p4d_bad(*p4d))) { 1308 p4d_clear_bad(p4d); 1309 return 1; 1310 } 1311 return 0; 1312} 1313 1314static inline int pud_none_or_clear_bad(pud_t *pud) 1315{ 1316 if (pud_none(*pud)) 1317 return 1; 1318 if (unlikely(pud_bad(*pud))) { 1319 pud_clear_bad(pud); 1320 return 1; 1321 } 1322 return 0; 1323} 1324 1325static inline int pmd_none_or_clear_bad(pmd_t *pmd) 1326{ 1327 if (pmd_none(*pmd)) 1328 return 1; 1329 if (unlikely(pmd_bad(*pmd))) { 1330 pmd_clear_bad(pmd); 1331 return 1; 1332 } 1333 return 0; 1334} 1335 1336static inline pte_t __ptep_modify_prot_start(struct vm_area_struct *vma, 1337 unsigned long addr, 1338 pte_t *ptep) 1339{ 1340 /* 1341 * Get the current pte state, but zero it out to make it 1342 * non-present, preventing the hardware from asynchronously 1343 * updating it. 1344 */ 1345 return ptep_get_and_clear(vma->vm_mm, addr, ptep); 1346} 1347 1348static inline void __ptep_modify_prot_commit(struct vm_area_struct *vma, 1349 unsigned long addr, 1350 pte_t *ptep, pte_t pte) 1351{ 1352 /* 1353 * The pte is non-present, so there's no hardware state to 1354 * preserve. 1355 */ 1356 set_pte_at(vma->vm_mm, addr, ptep, pte); 1357} 1358 1359#ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION 1360/* 1361 * Start a pte protection read-modify-write transaction, which 1362 * protects against asynchronous hardware modifications to the pte. 1363 * The intention is not to prevent the hardware from making pte 1364 * updates, but to prevent any updates it may make from being lost. 1365 * 1366 * This does not protect against other software modifications of the 1367 * pte; the appropriate pte lock must be held over the transaction. 1368 * 1369 * Note that this interface is intended to be batchable, meaning that 1370 * ptep_modify_prot_commit may not actually update the pte, but merely 1371 * queue the update to be done at some later time. The update must be 1372 * actually committed before the pte lock is released, however. 1373 */ 1374static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, 1375 unsigned long addr, 1376 pte_t *ptep) 1377{ 1378 return __ptep_modify_prot_start(vma, addr, ptep); 1379} 1380 1381/* 1382 * Commit an update to a pte, leaving any hardware-controlled bits in 1383 * the PTE unmodified. The pte returned from ptep_modify_prot_start() may 1384 * additionally have young and/or dirty bits set where previously they were not, 1385 * so the updated pte may have these additional changes. 1386 */ 1387static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, 1388 unsigned long addr, 1389 pte_t *ptep, pte_t old_pte, pte_t pte) 1390{ 1391 __ptep_modify_prot_commit(vma, addr, ptep, pte); 1392} 1393#endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */ 1394 1395/** 1396 * modify_prot_start_ptes - Start a pte protection read-modify-write transaction 1397 * over a batch of ptes, which protects against asynchronous hardware 1398 * modifications to the ptes. The intention is not to prevent the hardware from 1399 * making pte updates, but to prevent any updates it may make from being lost. 1400 * Please see the comment above ptep_modify_prot_start() for full description. 1401 * 1402 * @vma: The virtual memory area the pages are mapped into. 1403 * @addr: Address the first page is mapped at. 1404 * @ptep: Page table pointer for the first entry. 1405 * @nr: Number of entries. 1406 * 1407 * May be overridden by the architecture; otherwise, implemented as a simple 1408 * loop over ptep_modify_prot_start(), collecting the a/d bits from each pte 1409 * in the batch. 1410 * 1411 * Note that PTE bits in the PTE batch besides the PFN can differ. 1412 * 1413 * Context: The caller holds the page table lock. The PTEs map consecutive 1414 * pages that belong to the same folio. All other PTE bits must be identical for 1415 * all PTEs in the batch except for young and dirty bits. The PTEs are all in 1416 * the same PMD. 1417 */ 1418#ifndef modify_prot_start_ptes 1419static inline pte_t modify_prot_start_ptes(struct vm_area_struct *vma, 1420 unsigned long addr, pte_t *ptep, unsigned int nr) 1421{ 1422 pte_t pte, tmp_pte; 1423 1424 pte = ptep_modify_prot_start(vma, addr, ptep); 1425 while (--nr) { 1426 ptep++; 1427 addr += PAGE_SIZE; 1428 tmp_pte = ptep_modify_prot_start(vma, addr, ptep); 1429 if (pte_dirty(tmp_pte)) 1430 pte = pte_mkdirty(pte); 1431 if (pte_young(tmp_pte)) 1432 pte = pte_mkyoung(pte); 1433 } 1434 return pte; 1435} 1436#endif 1437 1438/** 1439 * modify_prot_commit_ptes - Commit an update to a batch of ptes, leaving any 1440 * hardware-controlled bits in the PTE unmodified. 1441 * 1442 * @vma: The virtual memory area the pages are mapped into. 1443 * @addr: Address the first page is mapped at. 1444 * @ptep: Page table pointer for the first entry. 1445 * @old_pte: Old page table entry (for the first entry) which is now cleared. 1446 * @pte: New page table entry to be set. 1447 * @nr: Number of entries. 1448 * 1449 * May be overridden by the architecture; otherwise, implemented as a simple 1450 * loop over ptep_modify_prot_commit(). 1451 * 1452 * Context: The caller holds the page table lock. The PTEs are all in the same 1453 * PMD. On exit, the set ptes in the batch map the same folio. The ptes set by 1454 * ptep_modify_prot_start() may additionally have young and/or dirty bits set 1455 * where previously they were not, so the updated ptes may have these 1456 * additional changes. 1457 */ 1458#ifndef modify_prot_commit_ptes 1459static inline void modify_prot_commit_ptes(struct vm_area_struct *vma, unsigned long addr, 1460 pte_t *ptep, pte_t old_pte, pte_t pte, unsigned int nr) 1461{ 1462 int i; 1463 1464 for (i = 0; i < nr; ++i, ++ptep, addr += PAGE_SIZE) { 1465 ptep_modify_prot_commit(vma, addr, ptep, old_pte, pte); 1466 1467 /* Advance PFN only, set same prot */ 1468 old_pte = pte_next_pfn(old_pte); 1469 pte = pte_next_pfn(pte); 1470 } 1471} 1472#endif 1473 1474/* 1475 * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values 1476 * and let generic vmalloc, ioremap and page table update code know when 1477 * arch_sync_kernel_mappings() needs to be called. 1478 */ 1479#ifndef ARCH_PAGE_TABLE_SYNC_MASK 1480#define ARCH_PAGE_TABLE_SYNC_MASK 0 1481#endif 1482 1483/* 1484 * There is no default implementation for arch_sync_kernel_mappings(). It is 1485 * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK 1486 * is 0. 1487 */ 1488void arch_sync_kernel_mappings(unsigned long start, unsigned long end); 1489 1490#endif /* CONFIG_MMU */ 1491 1492/* 1493 * No-op macros that just return the current protection value. Defined here 1494 * because these macros can be used even if CONFIG_MMU is not defined. 1495 */ 1496 1497#ifndef pgprot_nx 1498#define pgprot_nx(prot) (prot) 1499#endif 1500 1501#ifndef pgprot_noncached 1502#define pgprot_noncached(prot) (prot) 1503#endif 1504 1505#ifndef pgprot_writecombine 1506#define pgprot_writecombine pgprot_noncached 1507#endif 1508 1509#ifndef pgprot_writethrough 1510#define pgprot_writethrough pgprot_noncached 1511#endif 1512 1513#ifndef pgprot_device 1514#define pgprot_device pgprot_noncached 1515#endif 1516 1517#ifndef pgprot_mhp 1518#define pgprot_mhp(prot) (prot) 1519#endif 1520 1521#ifdef CONFIG_MMU 1522#ifndef pgprot_modify 1523#define pgprot_modify pgprot_modify 1524static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) 1525{ 1526 if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot))) 1527 newprot = pgprot_noncached(newprot); 1528 if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot))) 1529 newprot = pgprot_writecombine(newprot); 1530 if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot))) 1531 newprot = pgprot_device(newprot); 1532 return newprot; 1533} 1534#endif 1535#endif /* CONFIG_MMU */ 1536 1537#ifndef pgprot_encrypted 1538#define pgprot_encrypted(prot) (prot) 1539#endif 1540 1541#ifndef pgprot_decrypted 1542#define pgprot_decrypted(prot) (prot) 1543#endif 1544 1545/* 1546 * A facility to provide batching of the reload of page tables and 1547 * other process state with the actual context switch code for 1548 * paravirtualized guests. By convention, only one of the batched 1549 * update (lazy) modes (CPU, MMU) should be active at any given time, 1550 * entry should never be nested, and entry and exits should always be 1551 * paired. This is for sanity of maintaining and reasoning about the 1552 * kernel code. In this case, the exit (end of the context switch) is 1553 * in architecture-specific code, and so doesn't need a generic 1554 * definition. 1555 */ 1556#ifndef __HAVE_ARCH_START_CONTEXT_SWITCH 1557#define arch_start_context_switch(prev) do {} while (0) 1558#endif 1559 1560/* 1561 * Some platforms can customize the PTE soft-dirty bit making it unavailable 1562 * even if the architecture provides the resource. 1563 * Adding this API allows architectures to add their own checks for the 1564 * devices on which the kernel is running. 1565 * Note: When overriding it, please make sure the CONFIG_MEM_SOFT_DIRTY 1566 * is part of this macro. 1567 */ 1568#ifndef pgtable_supports_soft_dirty 1569#define pgtable_supports_soft_dirty() IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) 1570#endif 1571 1572#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY 1573#ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION 1574static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd) 1575{ 1576 return pmd; 1577} 1578 1579static inline int pmd_swp_soft_dirty(pmd_t pmd) 1580{ 1581 return 0; 1582} 1583 1584static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd) 1585{ 1586 return pmd; 1587} 1588#endif 1589#else /* !CONFIG_HAVE_ARCH_SOFT_DIRTY */ 1590static inline int pte_soft_dirty(pte_t pte) 1591{ 1592 return 0; 1593} 1594 1595static inline int pmd_soft_dirty(pmd_t pmd) 1596{ 1597 return 0; 1598} 1599 1600static inline pte_t pte_mksoft_dirty(pte_t pte) 1601{ 1602 return pte; 1603} 1604 1605static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) 1606{ 1607 return pmd; 1608} 1609 1610static inline pte_t pte_clear_soft_dirty(pte_t pte) 1611{ 1612 return pte; 1613} 1614 1615static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) 1616{ 1617 return pmd; 1618} 1619 1620static inline pte_t pte_swp_mksoft_dirty(pte_t pte) 1621{ 1622 return pte; 1623} 1624 1625static inline int pte_swp_soft_dirty(pte_t pte) 1626{ 1627 return 0; 1628} 1629 1630static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) 1631{ 1632 return pte; 1633} 1634 1635static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd) 1636{ 1637 return pmd; 1638} 1639 1640static inline int pmd_swp_soft_dirty(pmd_t pmd) 1641{ 1642 return 0; 1643} 1644 1645static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd) 1646{ 1647 return pmd; 1648} 1649#endif 1650 1651#ifndef __HAVE_PFNMAP_TRACKING 1652/* 1653 * Interfaces that can be used by architecture code to keep track of 1654 * memory type of pfn mappings specified by the remap_pfn_range, 1655 * vmf_insert_pfn. 1656 */ 1657 1658static inline int pfnmap_setup_cachemode(unsigned long pfn, unsigned long size, 1659 pgprot_t *prot) 1660{ 1661 return 0; 1662} 1663 1664static inline int pfnmap_track(unsigned long pfn, unsigned long size, 1665 pgprot_t *prot) 1666{ 1667 return 0; 1668} 1669 1670static inline void pfnmap_untrack(unsigned long pfn, unsigned long size) 1671{ 1672} 1673#else 1674/** 1675 * pfnmap_setup_cachemode - setup the cachemode in the pgprot for a pfn range 1676 * @pfn: the start of the pfn range 1677 * @size: the size of the pfn range in bytes 1678 * @prot: the pgprot to modify 1679 * 1680 * Lookup the cachemode for the pfn range starting at @pfn with the size 1681 * @size and store it in @prot, leaving other data in @prot unchanged. 1682 * 1683 * This allows for a hardware implementation to have fine-grained control of 1684 * memory cache behavior at page level granularity. Without a hardware 1685 * implementation, this function does nothing. 1686 * 1687 * Currently there is only one implementation for this - x86 Page Attribute 1688 * Table (PAT). See Documentation/arch/x86/pat.rst for more details. 1689 * 1690 * This function can fail if the pfn range spans pfns that require differing 1691 * cachemodes. If the pfn range was previously verified to have a single 1692 * cachemode, it is sufficient to query only a single pfn. The assumption is 1693 * that this is the case for drivers using the vmf_insert_pfn*() interface. 1694 * 1695 * Returns 0 on success and -EINVAL on error. 1696 */ 1697int pfnmap_setup_cachemode(unsigned long pfn, unsigned long size, 1698 pgprot_t *prot); 1699 1700/** 1701 * pfnmap_track - track a pfn range 1702 * @pfn: the start of the pfn range 1703 * @size: the size of the pfn range in bytes 1704 * @prot: the pgprot to track 1705 * 1706 * Requested the pfn range to be 'tracked' by a hardware implementation and 1707 * setup the cachemode in @prot similar to pfnmap_setup_cachemode(). 1708 * 1709 * This allows for fine-grained control of memory cache behaviour at page 1710 * level granularity. Tracking memory this way is persisted across VMA splits 1711 * (VMA merging does not apply for VM_PFNMAP). 1712 * 1713 * Currently, there is only one implementation for this - x86 Page Attribute 1714 * Table (PAT). See Documentation/arch/x86/pat.rst for more details. 1715 * 1716 * Returns 0 on success and -EINVAL on error. 1717 */ 1718int pfnmap_track(unsigned long pfn, unsigned long size, pgprot_t *prot); 1719 1720/** 1721 * pfnmap_untrack - untrack a pfn range 1722 * @pfn: the start of the pfn range 1723 * @size: the size of the pfn range in bytes 1724 * 1725 * Untrack a pfn range previously tracked through pfnmap_track(). 1726 */ 1727void pfnmap_untrack(unsigned long pfn, unsigned long size); 1728#endif 1729 1730/** 1731 * pfnmap_setup_cachemode_pfn - setup the cachemode in the pgprot for a pfn 1732 * @pfn: the pfn 1733 * @prot: the pgprot to modify 1734 * 1735 * Lookup the cachemode for @pfn and store it in @prot, leaving other 1736 * data in @prot unchanged. 1737 * 1738 * See pfnmap_setup_cachemode() for details. 1739 */ 1740static inline void pfnmap_setup_cachemode_pfn(unsigned long pfn, pgprot_t *prot) 1741{ 1742 pfnmap_setup_cachemode(pfn, PAGE_SIZE, prot); 1743} 1744 1745#ifdef CONFIG_MMU 1746#ifdef __HAVE_COLOR_ZERO_PAGE 1747static inline int is_zero_pfn(unsigned long pfn) 1748{ 1749 extern unsigned long zero_pfn; 1750 unsigned long offset_from_zero_pfn = pfn - zero_pfn; 1751 return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT); 1752} 1753 1754#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr)) 1755 1756#else 1757static inline int is_zero_pfn(unsigned long pfn) 1758{ 1759 extern unsigned long zero_pfn; 1760 return pfn == zero_pfn; 1761} 1762 1763static inline unsigned long my_zero_pfn(unsigned long addr) 1764{ 1765 extern unsigned long zero_pfn; 1766 return zero_pfn; 1767} 1768#endif 1769#else 1770static inline int is_zero_pfn(unsigned long pfn) 1771{ 1772 return 0; 1773} 1774 1775static inline unsigned long my_zero_pfn(unsigned long addr) 1776{ 1777 return 0; 1778} 1779#endif /* CONFIG_MMU */ 1780 1781#ifdef CONFIG_MMU 1782 1783#ifndef CONFIG_TRANSPARENT_HUGEPAGE 1784static inline int pmd_trans_huge(pmd_t pmd) 1785{ 1786 return 0; 1787} 1788#ifndef pmd_write 1789static inline int pmd_write(pmd_t pmd) 1790{ 1791 BUG(); 1792 return 0; 1793} 1794#endif /* pmd_write */ 1795#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1796 1797#ifndef pud_write 1798static inline int pud_write(pud_t pud) 1799{ 1800 BUG(); 1801 return 0; 1802} 1803#endif /* pud_write */ 1804 1805#if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \ 1806 !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) 1807static inline int pud_trans_huge(pud_t pud) 1808{ 1809 return 0; 1810} 1811#endif 1812 1813static inline int pud_trans_unstable(pud_t *pud) 1814{ 1815#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ 1816 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) 1817 pud_t pudval = READ_ONCE(*pud); 1818 1819 if (pud_none(pudval) || pud_trans_huge(pudval)) 1820 return 1; 1821 if (unlikely(pud_bad(pudval))) { 1822 pud_clear_bad(pud); 1823 return 1; 1824 } 1825#endif 1826 return 0; 1827} 1828 1829#ifndef CONFIG_NUMA_BALANCING 1830/* 1831 * In an inaccessible (PROT_NONE) VMA, pte_protnone() may indicate "yes". It is 1832 * perfectly valid to indicate "no" in that case, which is why our default 1833 * implementation defaults to "always no". 1834 * 1835 * In an accessible VMA, however, pte_protnone() reliably indicates PROT_NONE 1836 * page protection due to NUMA hinting. NUMA hinting faults only apply in 1837 * accessible VMAs. 1838 * 1839 * So, to reliably identify PROT_NONE PTEs that require a NUMA hinting fault, 1840 * looking at the VMA accessibility is sufficient. 1841 */ 1842static inline int pte_protnone(pte_t pte) 1843{ 1844 return 0; 1845} 1846 1847static inline int pmd_protnone(pmd_t pmd) 1848{ 1849 return 0; 1850} 1851#endif /* CONFIG_NUMA_BALANCING */ 1852 1853#endif /* CONFIG_MMU */ 1854 1855#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP 1856 1857#ifndef __PAGETABLE_P4D_FOLDED 1858int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot); 1859void p4d_clear_huge(p4d_t *p4d); 1860#else 1861static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) 1862{ 1863 return 0; 1864} 1865static inline void p4d_clear_huge(p4d_t *p4d) { } 1866#endif /* !__PAGETABLE_P4D_FOLDED */ 1867 1868int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot); 1869int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); 1870int pud_clear_huge(pud_t *pud); 1871int pmd_clear_huge(pmd_t *pmd); 1872int p4d_free_pud_page(p4d_t *p4d, unsigned long addr); 1873int pud_free_pmd_page(pud_t *pud, unsigned long addr); 1874int pmd_free_pte_page(pmd_t *pmd, unsigned long addr); 1875#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ 1876static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) 1877{ 1878 return 0; 1879} 1880static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) 1881{ 1882 return 0; 1883} 1884static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) 1885{ 1886 return 0; 1887} 1888static inline void p4d_clear_huge(p4d_t *p4d) { } 1889static inline int pud_clear_huge(pud_t *pud) 1890{ 1891 return 0; 1892} 1893static inline int pmd_clear_huge(pmd_t *pmd) 1894{ 1895 return 0; 1896} 1897static inline int p4d_free_pud_page(p4d_t *p4d, unsigned long addr) 1898{ 1899 return 0; 1900} 1901static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr) 1902{ 1903 return 0; 1904} 1905static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) 1906{ 1907 return 0; 1908} 1909#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ 1910 1911#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE 1912#ifdef CONFIG_TRANSPARENT_HUGEPAGE 1913/* 1914 * ARCHes with special requirements for evicting THP backing TLB entries can 1915 * implement this. Otherwise also, it can help optimize normal TLB flush in 1916 * THP regime. Stock flush_tlb_range() typically has optimization to nuke the 1917 * entire TLB if flush span is greater than a threshold, which will 1918 * likely be true for a single huge page. Thus a single THP flush will 1919 * invalidate the entire TLB which is not desirable. 1920 * e.g. see arch/arc: flush_pmd_tlb_range 1921 */ 1922#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) 1923#define flush_pud_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) 1924#else 1925#define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG() 1926#define flush_pud_tlb_range(vma, addr, end) BUILD_BUG() 1927#endif 1928#endif 1929 1930struct file; 1931int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, 1932 unsigned long size, pgprot_t *vma_prot); 1933 1934#ifndef CONFIG_X86_ESPFIX64 1935static inline void init_espfix_bsp(void) { } 1936#endif 1937 1938extern void __init pgtable_cache_init(void); 1939 1940#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED 1941static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot) 1942{ 1943 return true; 1944} 1945 1946static inline bool arch_has_pfn_modify_check(void) 1947{ 1948 return false; 1949} 1950#endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */ 1951 1952/* 1953 * Architecture PAGE_KERNEL_* fallbacks 1954 * 1955 * Some architectures don't define certain PAGE_KERNEL_* flags. This is either 1956 * because they really don't support them, or the port needs to be updated to 1957 * reflect the required functionality. Below are a set of relatively safe 1958 * fallbacks, as best effort, which we can count on in lieu of the architectures 1959 * not defining them on their own yet. 1960 */ 1961 1962#ifndef PAGE_KERNEL_RO 1963# define PAGE_KERNEL_RO PAGE_KERNEL 1964#endif 1965 1966#ifndef PAGE_KERNEL_EXEC 1967# define PAGE_KERNEL_EXEC PAGE_KERNEL 1968#endif 1969 1970/* 1971 * Page Table Modification bits for pgtbl_mod_mask. 1972 * 1973 * These are used by the p?d_alloc_track*() and p*d_populate_kernel() 1974 * functions in the generic vmalloc, ioremap and page table update code 1975 * to track at which page-table levels entries have been modified. 1976 * Based on that the code can better decide when page table changes need 1977 * to be synchronized to other page-tables in the system. 1978 */ 1979#define __PGTBL_PGD_MODIFIED 0 1980#define __PGTBL_P4D_MODIFIED 1 1981#define __PGTBL_PUD_MODIFIED 2 1982#define __PGTBL_PMD_MODIFIED 3 1983#define __PGTBL_PTE_MODIFIED 4 1984 1985#define PGTBL_PGD_MODIFIED BIT(__PGTBL_PGD_MODIFIED) 1986#define PGTBL_P4D_MODIFIED BIT(__PGTBL_P4D_MODIFIED) 1987#define PGTBL_PUD_MODIFIED BIT(__PGTBL_PUD_MODIFIED) 1988#define PGTBL_PMD_MODIFIED BIT(__PGTBL_PMD_MODIFIED) 1989#define PGTBL_PTE_MODIFIED BIT(__PGTBL_PTE_MODIFIED) 1990 1991/* Page-Table Modification Mask */ 1992typedef unsigned int pgtbl_mod_mask; 1993 1994enum pgtable_level { 1995 PGTABLE_LEVEL_PTE = 0, 1996 PGTABLE_LEVEL_PMD, 1997 PGTABLE_LEVEL_PUD, 1998 PGTABLE_LEVEL_P4D, 1999 PGTABLE_LEVEL_PGD, 2000}; 2001 2002static inline const char *pgtable_level_to_str(enum pgtable_level level) 2003{ 2004 switch (level) { 2005 case PGTABLE_LEVEL_PTE: 2006 return "pte"; 2007 case PGTABLE_LEVEL_PMD: 2008 return "pmd"; 2009 case PGTABLE_LEVEL_PUD: 2010 return "pud"; 2011 case PGTABLE_LEVEL_P4D: 2012 return "p4d"; 2013 case PGTABLE_LEVEL_PGD: 2014 return "pgd"; 2015 default: 2016 return "unknown"; 2017 } 2018} 2019 2020#endif /* !__ASSEMBLY__ */ 2021 2022#if !defined(MAX_POSSIBLE_PHYSMEM_BITS) && !defined(CONFIG_64BIT) 2023#ifdef CONFIG_PHYS_ADDR_T_64BIT 2024/* 2025 * ZSMALLOC needs to know the highest PFN on 32-bit architectures 2026 * with physical address space extension, but falls back to 2027 * BITS_PER_LONG otherwise. 2028 */ 2029#error Missing MAX_POSSIBLE_PHYSMEM_BITS definition 2030#else 2031#define MAX_POSSIBLE_PHYSMEM_BITS 32 2032#endif 2033#endif 2034 2035#ifndef has_transparent_hugepage 2036#define has_transparent_hugepage() IS_BUILTIN(CONFIG_TRANSPARENT_HUGEPAGE) 2037#endif 2038 2039#ifndef has_transparent_pud_hugepage 2040#define has_transparent_pud_hugepage() IS_BUILTIN(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) 2041#endif 2042/* 2043 * On some architectures it depends on the mm if the p4d/pud or pmd 2044 * layer of the page table hierarchy is folded or not. 2045 */ 2046#ifndef mm_p4d_folded 2047#define mm_p4d_folded(mm) __is_defined(__PAGETABLE_P4D_FOLDED) 2048#endif 2049 2050#ifndef mm_pud_folded 2051#define mm_pud_folded(mm) __is_defined(__PAGETABLE_PUD_FOLDED) 2052#endif 2053 2054#ifndef mm_pmd_folded 2055#define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED) 2056#endif 2057 2058#ifndef p4d_offset_lockless 2059#define p4d_offset_lockless(pgdp, pgd, address) p4d_offset(&(pgd), address) 2060#endif 2061#ifndef pud_offset_lockless 2062#define pud_offset_lockless(p4dp, p4d, address) pud_offset(&(p4d), address) 2063#endif 2064#ifndef pmd_offset_lockless 2065#define pmd_offset_lockless(pudp, pud, address) pmd_offset(&(pud), address) 2066#endif 2067 2068/* 2069 * pXd_leaf() is the API to check whether a pgtable entry is a huge page 2070 * mapping. It should work globally across all archs, without any 2071 * dependency on CONFIG_* options. For architectures that do not support 2072 * huge mappings on specific levels, below fallbacks will be used. 2073 * 2074 * A leaf pgtable entry should always imply the following: 2075 * 2076 * - It is a "present" entry. IOW, before using this API, please check it 2077 * with pXd_present() first. NOTE: it may not always mean the "present 2078 * bit" is set. For example, PROT_NONE entries are always "present". 2079 * 2080 * - It should _never_ be a swap entry of any type. Above "present" check 2081 * should have guarded this, but let's be crystal clear on this. 2082 * 2083 * - It should contain a huge PFN, which points to a huge page larger than 2084 * PAGE_SIZE of the platform. The PFN format isn't important here. 2085 * 2086 * - It should cover all kinds of huge mappings (i.e. pXd_trans_huge() 2087 * or hugetlb mappings). 2088 */ 2089#ifndef pgd_leaf 2090#define pgd_leaf(x) false 2091#endif 2092#ifndef p4d_leaf 2093#define p4d_leaf(x) false 2094#endif 2095#ifndef pud_leaf 2096#define pud_leaf(x) false 2097#endif 2098#ifndef pmd_leaf 2099#define pmd_leaf(x) false 2100#endif 2101 2102#ifndef pgd_leaf_size 2103#define pgd_leaf_size(x) (1ULL << PGDIR_SHIFT) 2104#endif 2105#ifndef p4d_leaf_size 2106#define p4d_leaf_size(x) P4D_SIZE 2107#endif 2108#ifndef pud_leaf_size 2109#define pud_leaf_size(x) PUD_SIZE 2110#endif 2111#ifndef pmd_leaf_size 2112#define pmd_leaf_size(x) PMD_SIZE 2113#endif 2114#ifndef __pte_leaf_size 2115#ifndef pte_leaf_size 2116#define pte_leaf_size(x) PAGE_SIZE 2117#endif 2118#define __pte_leaf_size(x,y) pte_leaf_size(y) 2119#endif 2120 2121/* 2122 * We always define pmd_pfn for all archs as it's used in lots of generic 2123 * code. Now it happens too for pud_pfn (and can happen for larger 2124 * mappings too in the future; we're not there yet). Instead of defining 2125 * it for all archs (like pmd_pfn), provide a fallback. 2126 * 2127 * Note that returning 0 here means any arch that didn't define this can 2128 * get severely wrong when it hits a real pud leaf. It's arch's 2129 * responsibility to properly define it when a huge pud is possible. 2130 */ 2131#ifndef pud_pfn 2132#define pud_pfn(x) 0 2133#endif 2134 2135/* 2136 * Some architectures have MMUs that are configurable or selectable at boot 2137 * time. These lead to variable PTRS_PER_x. For statically allocated arrays it 2138 * helps to have a static maximum value. 2139 */ 2140 2141#ifndef MAX_PTRS_PER_PTE 2142#define MAX_PTRS_PER_PTE PTRS_PER_PTE 2143#endif 2144 2145#ifndef MAX_PTRS_PER_PMD 2146#define MAX_PTRS_PER_PMD PTRS_PER_PMD 2147#endif 2148 2149#ifndef MAX_PTRS_PER_PUD 2150#define MAX_PTRS_PER_PUD PTRS_PER_PUD 2151#endif 2152 2153#ifndef MAX_PTRS_PER_P4D 2154#define MAX_PTRS_PER_P4D PTRS_PER_P4D 2155#endif 2156 2157#ifndef pte_pgprot 2158#define pte_pgprot(x) ((pgprot_t) {0}) 2159#endif 2160 2161#ifndef pmd_pgprot 2162#define pmd_pgprot(x) ((pgprot_t) {0}) 2163#endif 2164 2165#ifndef pud_pgprot 2166#define pud_pgprot(x) ((pgprot_t) {0}) 2167#endif 2168 2169/* description of effects of mapping type and prot in current implementation. 2170 * this is due to the limited x86 page protection hardware. The expected 2171 * behavior is in parens: 2172 * 2173 * map_type prot 2174 * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC 2175 * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes 2176 * w: (no) no w: (no) no w: (yes) yes w: (no) no 2177 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes 2178 * 2179 * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes 2180 * w: (no) no w: (no) no w: (copy) copy w: (no) no 2181 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes 2182 * 2183 * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and 2184 * MAP_PRIVATE (with Enhanced PAN supported): 2185 * r: (no) no 2186 * w: (no) no 2187 * x: (yes) yes 2188 */ 2189#define DECLARE_VM_GET_PAGE_PROT \ 2190pgprot_t vm_get_page_prot(vm_flags_t vm_flags) \ 2191{ \ 2192 return protection_map[vm_flags & \ 2193 (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)]; \ 2194} \ 2195EXPORT_SYMBOL(vm_get_page_prot); 2196 2197#endif /* _LINUX_PGTABLE_H */