Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: remove devmap related functions and page table bits

Now that DAX and all other reference counts to ZONE_DEVICE pages are
managed normally there is no need for the special devmap PTE/PMD/PUD page
table bits. So drop all references to these, freeing up a software
defined page table bit on architectures supporting it.

Link: https://lkml.kernel.org/r/6389398c32cc9daa3dfcaa9f79c7972525d310ce.1750323463.git-series.apopple@nvidia.com
Signed-off-by: Alistair Popple <apopple@nvidia.com>
Acked-by: Will Deacon <will@kernel.org> # arm64
Acked-by: David Hildenbrand <david@redhat.com>
Suggested-by: Chunyan Zhang <zhang.lyra@gmail.com>
Reviewed-by: Björn Töpel <bjorn@rivosinc.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Cc: Balbir Singh <balbirs@nvidia.com>
Cc: Björn Töpel <bjorn@kernel.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Deepak Gupta <debug@rivosinc.com>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Inki Dae <m.szyprowski@samsung.com>
Cc: John Groves <john@groves.net>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Alistair Popple and committed by
Andrew Morton
d438d273 28dc88c3

+17 -319
-6
Documentation/mm/arch_pgtable_helpers.rst
··· 30 30 +---------------------------+--------------------------------------------------+ 31 31 | pte_protnone | Tests a PROT_NONE PTE | 32 32 +---------------------------+--------------------------------------------------+ 33 - | pte_devmap | Tests a ZONE_DEVICE mapped PTE | 34 - +---------------------------+--------------------------------------------------+ 35 33 | pte_soft_dirty | Tests a soft dirty PTE | 36 34 +---------------------------+--------------------------------------------------+ 37 35 | pte_swp_soft_dirty | Tests a soft dirty swapped PTE | ··· 102 104 +---------------------------+--------------------------------------------------+ 103 105 | pmd_protnone | Tests a PROT_NONE PMD | 104 106 +---------------------------+--------------------------------------------------+ 105 - | pmd_devmap | Tests a ZONE_DEVICE mapped PMD | 106 - +---------------------------+--------------------------------------------------+ 107 107 | pmd_soft_dirty | Tests a soft dirty PMD | 108 108 +---------------------------+--------------------------------------------------+ 109 109 | pmd_swp_soft_dirty | Tests a soft dirty swapped PMD | ··· 172 176 | pud_dirty | Tests a dirty PUD | 173 177 +---------------------------+--------------------------------------------------+ 174 178 | pud_write | Tests a writable PUD | 175 - +---------------------------+--------------------------------------------------+ 176 - | pud_devmap | Tests a ZONE_DEVICE mapped PUD | 177 179 +---------------------------+--------------------------------------------------+ 178 180 | pud_mkyoung | Creates a young PUD | 179 181 +---------------------------+--------------------------------------------------+
-1
arch/arm64/Kconfig
··· 44 44 select ARCH_HAS_NONLEAF_PMD_YOUNG if ARM64_HAFT 45 45 select ARCH_HAS_PREEMPT_LAZY 46 46 select ARCH_HAS_PTDUMP 47 - select ARCH_HAS_PTE_DEVMAP 48 47 select ARCH_HAS_PTE_SPECIAL 49 48 select ARCH_HAS_HW_PTE_YOUNG 50 49 select ARCH_HAS_SETUP_DMA_OPS
-1
arch/arm64/include/asm/pgtable-prot.h
··· 17 17 #define PTE_SWP_EXCLUSIVE (_AT(pteval_t, 1) << 2) /* only for swp ptes */ 18 18 #define PTE_DIRTY (_AT(pteval_t, 1) << 55) 19 19 #define PTE_SPECIAL (_AT(pteval_t, 1) << 56) 20 - #define PTE_DEVMAP (_AT(pteval_t, 1) << 57) 21 20 22 21 /* 23 22 * PTE_PRESENT_INVALID=1 & PTE_VALID=0 indicates that the pte's fields should be
-24
arch/arm64/include/asm/pgtable.h
··· 190 190 #define pte_user(pte) (!!(pte_val(pte) & PTE_USER)) 191 191 #define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN)) 192 192 #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT)) 193 - #define pte_devmap(pte) (!!(pte_val(pte) & PTE_DEVMAP)) 194 193 #define pte_tagged(pte) ((pte_val(pte) & PTE_ATTRINDX_MASK) == \ 195 194 PTE_ATTRINDX(MT_NORMAL_TAGGED)) 196 195 ··· 369 370 static inline pmd_t pmd_mkcont(pmd_t pmd) 370 371 { 371 372 return __pmd(pmd_val(pmd) | PMD_SECT_CONT); 372 - } 373 - 374 - static inline pte_t pte_mkdevmap(pte_t pte) 375 - { 376 - return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL)); 377 373 } 378 374 379 375 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP ··· 645 651 pmdval_t val = PMD_TYPE_SECT & ~PTE_VALID; 646 652 647 653 return __pmd((pmd_val(pmd) & ~mask) | val); 648 - } 649 - 650 - #ifdef CONFIG_TRANSPARENT_HUGEPAGE 651 - #define pmd_devmap(pmd) pte_devmap(pmd_pte(pmd)) 652 - #endif 653 - static inline pmd_t pmd_mkdevmap(pmd_t pmd) 654 - { 655 - return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP))); 656 654 } 657 655 658 656 #ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP ··· 1287 1301 { 1288 1302 return __ptep_set_access_flags(vma, address, (pte_t *)pmdp, 1289 1303 pmd_pte(entry), dirty); 1290 - } 1291 - 1292 - static inline int pud_devmap(pud_t pud) 1293 - { 1294 - return 0; 1295 - } 1296 - 1297 - static inline int pgd_devmap(pgd_t pgd) 1298 - { 1299 - return 0; 1300 1304 } 1301 1305 #endif 1302 1306
-1
arch/loongarch/Kconfig
··· 25 25 select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS 26 26 select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 27 27 select ARCH_HAS_PREEMPT_LAZY 28 - select ARCH_HAS_PTE_DEVMAP 29 28 select ARCH_HAS_PTE_SPECIAL 30 29 select ARCH_HAS_SET_MEMORY 31 30 select ARCH_HAS_SET_DIRECT_MAP
+2 -4
arch/loongarch/include/asm/pgtable-bits.h
··· 22 22 #define _PAGE_PFN_SHIFT 12 23 23 #define _PAGE_SWP_EXCLUSIVE_SHIFT 23 24 24 #define _PAGE_PFN_END_SHIFT 48 25 - #define _PAGE_DEVMAP_SHIFT 59 26 25 #define _PAGE_PRESENT_INVALID_SHIFT 60 27 26 #define _PAGE_NO_READ_SHIFT 61 28 27 #define _PAGE_NO_EXEC_SHIFT 62 ··· 35 36 #define _PAGE_MODIFIED (_ULCAST_(1) << _PAGE_MODIFIED_SHIFT) 36 37 #define _PAGE_PROTNONE (_ULCAST_(1) << _PAGE_PROTNONE_SHIFT) 37 38 #define _PAGE_SPECIAL (_ULCAST_(1) << _PAGE_SPECIAL_SHIFT) 38 - #define _PAGE_DEVMAP (_ULCAST_(1) << _PAGE_DEVMAP_SHIFT) 39 39 40 40 /* We borrow bit 23 to store the exclusive marker in swap PTEs. */ 41 41 #define _PAGE_SWP_EXCLUSIVE (_ULCAST_(1) << _PAGE_SWP_EXCLUSIVE_SHIFT) ··· 74 76 #define __READABLE (_PAGE_VALID) 75 77 #define __WRITEABLE (_PAGE_DIRTY | _PAGE_WRITE) 76 78 77 - #define _PAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PAGE_DEVMAP | _PFN_MASK | _CACHE_MASK | _PAGE_PLV) 78 - #define _HPAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PAGE_DEVMAP | _PFN_MASK | _CACHE_MASK | _PAGE_PLV | _PAGE_HUGE) 79 + #define _PAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PFN_MASK | _CACHE_MASK | _PAGE_PLV) 80 + #define _HPAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PFN_MASK | _CACHE_MASK | _PAGE_PLV | _PAGE_HUGE) 79 81 80 82 #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_NO_READ | \ 81 83 _PAGE_USER | _CACHE_CC)
-19
arch/loongarch/include/asm/pgtable.h
··· 409 409 static inline pte_t pte_mkspecial(pte_t pte) { pte_val(pte) |= _PAGE_SPECIAL; return pte; } 410 410 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ 411 411 412 - static inline int pte_devmap(pte_t pte) { return !!(pte_val(pte) & _PAGE_DEVMAP); } 413 - static inline pte_t pte_mkdevmap(pte_t pte) { pte_val(pte) |= _PAGE_DEVMAP; return pte; } 414 - 415 412 #define pte_accessible pte_accessible 416 413 static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a) 417 414 { ··· 537 540 return pmd; 538 541 } 539 542 540 - static inline int pmd_devmap(pmd_t pmd) 541 - { 542 - return !!(pmd_val(pmd) & _PAGE_DEVMAP); 543 - } 544 - 545 - static inline pmd_t pmd_mkdevmap(pmd_t pmd) 546 - { 547 - pmd_val(pmd) |= _PAGE_DEVMAP; 548 - return pmd; 549 - } 550 - 551 543 static inline struct page *pmd_page(pmd_t pmd) 552 544 { 553 545 if (pmd_trans_huge(pmd)) ··· 591 605 592 606 #define pmd_leaf(pmd) ((pmd_val(pmd) & _PAGE_HUGE) != 0) 593 607 #define pud_leaf(pud) ((pud_val(pud) & _PAGE_HUGE) != 0) 594 - 595 - #ifdef CONFIG_TRANSPARENT_HUGEPAGE 596 - #define pud_devmap(pud) (0) 597 - #define pgd_devmap(pgd) (0) 598 - #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 599 608 600 609 /* 601 610 * We provide our own get_unmapped area to cope with the virtual aliasing
-1
arch/powerpc/Kconfig
··· 149 149 select ARCH_HAS_PMEM_API 150 150 select ARCH_HAS_PREEMPT_LAZY 151 151 select ARCH_HAS_PTDUMP 152 - select ARCH_HAS_PTE_DEVMAP if PPC_BOOK3S_64 153 152 select ARCH_HAS_PTE_SPECIAL 154 153 select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64 155 154 select ARCH_HAS_SET_MEMORY
-6
arch/powerpc/include/asm/book3s/64/hash-4k.h
··· 168 168 extern int hash__has_transparent_hugepage(void); 169 169 #endif 170 170 171 - static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd) 172 - { 173 - BUG(); 174 - return pmd; 175 - } 176 - 177 171 #endif /* !__ASSEMBLY__ */ 178 172 179 173 #endif /* _ASM_POWERPC_BOOK3S_64_HASH_4K_H */
+1 -6
arch/powerpc/include/asm/book3s/64/hash-64k.h
··· 259 259 */ 260 260 static inline int hash__pmd_trans_huge(pmd_t pmd) 261 261 { 262 - return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP)) == 262 + return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE)) == 263 263 (_PAGE_PTE | H_PAGE_THP_HUGE)); 264 264 } 265 265 ··· 280 280 unsigned long addr, pmd_t *pmdp); 281 281 extern int hash__has_transparent_hugepage(void); 282 282 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 283 - 284 - static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd) 285 - { 286 - return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP)); 287 - } 288 283 289 284 #endif /* __ASSEMBLY__ */ 290 285
+2 -51
arch/powerpc/include/asm/book3s/64/pgtable.h
··· 88 88 89 89 #define _PAGE_SOFT_DIRTY _RPAGE_SW3 /* software: software dirty tracking */ 90 90 #define _PAGE_SPECIAL _RPAGE_SW2 /* software: special page */ 91 - #define _PAGE_DEVMAP _RPAGE_SW1 /* software: ZONE_DEVICE page */ 92 91 93 92 /* 94 93 * Drivers request for cache inhibited pte mapping using _PAGE_NO_CACHE ··· 108 109 */ 109 110 #define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ 110 111 _PAGE_ACCESSED | H_PAGE_THP_HUGE | _PAGE_PTE | \ 111 - _PAGE_SOFT_DIRTY | _PAGE_DEVMAP) 112 + _PAGE_SOFT_DIRTY) 112 113 /* 113 114 * user access blocked by key 114 115 */ ··· 122 123 */ 123 124 #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ 124 125 _PAGE_ACCESSED | _PAGE_SPECIAL | _PAGE_PTE | \ 125 - _PAGE_SOFT_DIRTY | _PAGE_DEVMAP) 126 + _PAGE_SOFT_DIRTY) 126 127 127 128 /* 128 129 * We define 2 sets of base prot bits, one for basic pages (ie, ··· 606 607 static inline pte_t pte_mkhuge(pte_t pte) 607 608 { 608 609 return pte; 609 - } 610 - 611 - static inline pte_t pte_mkdevmap(pte_t pte) 612 - { 613 - return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SPECIAL | _PAGE_DEVMAP)); 614 - } 615 - 616 - /* 617 - * This is potentially called with a pmd as the argument, in which case it's not 618 - * safe to check _PAGE_DEVMAP unless we also confirm that _PAGE_PTE is set. 619 - * That's because the bit we use for _PAGE_DEVMAP is not reserved for software 620 - * use in page directory entries (ie. non-ptes). 621 - */ 622 - static inline int pte_devmap(pte_t pte) 623 - { 624 - __be64 mask = cpu_to_be64(_PAGE_DEVMAP | _PAGE_PTE); 625 - 626 - return (pte_raw(pte) & mask) == mask; 627 610 } 628 611 629 612 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) ··· 1360 1379 } 1361 1380 extern void serialize_against_pte_lookup(struct mm_struct *mm); 1362 1381 1363 - 1364 - static inline pmd_t pmd_mkdevmap(pmd_t pmd) 1365 - { 1366 - if (radix_enabled()) 1367 - return radix__pmd_mkdevmap(pmd); 1368 - return hash__pmd_mkdevmap(pmd); 1369 - } 1370 - 1371 - static inline pud_t pud_mkdevmap(pud_t pud) 1372 - { 1373 - if (radix_enabled()) 1374 - return radix__pud_mkdevmap(pud); 1375 - BUG(); 1376 - return pud; 1377 - } 1378 - 1379 - static inline int pmd_devmap(pmd_t pmd) 1380 - { 1381 - return pte_devmap(pmd_pte(pmd)); 1382 - } 1383 - 1384 - static inline int pud_devmap(pud_t pud) 1385 - { 1386 - return pte_devmap(pud_pte(pud)); 1387 - } 1388 - 1389 - static inline int pgd_devmap(pgd_t pgd) 1390 - { 1391 - return 0; 1392 - } 1393 1382 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1394 1383 1395 1384 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
+2 -12
arch/powerpc/include/asm/book3s/64/radix.h
··· 264 264 265 265 static inline int radix__pmd_trans_huge(pmd_t pmd) 266 266 { 267 - return (pmd_val(pmd) & (_PAGE_PTE | _PAGE_DEVMAP)) == _PAGE_PTE; 267 + return (pmd_val(pmd) & _PAGE_PTE) == _PAGE_PTE; 268 268 } 269 269 270 270 static inline pmd_t radix__pmd_mkhuge(pmd_t pmd) ··· 274 274 275 275 static inline int radix__pud_trans_huge(pud_t pud) 276 276 { 277 - return (pud_val(pud) & (_PAGE_PTE | _PAGE_DEVMAP)) == _PAGE_PTE; 277 + return (pud_val(pud) & _PAGE_PTE) == _PAGE_PTE; 278 278 } 279 279 280 280 static inline pud_t radix__pud_mkhuge(pud_t pud) ··· 314 314 return 0; 315 315 } 316 316 #endif 317 - 318 - static inline pmd_t radix__pmd_mkdevmap(pmd_t pmd) 319 - { 320 - return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP)); 321 - } 322 - 323 - static inline pud_t radix__pud_mkdevmap(pud_t pud) 324 - { 325 - return __pud(pud_val(pud) | (_PAGE_PTE | _PAGE_DEVMAP)); 326 - } 327 317 328 318 struct vmem_altmap; 329 319 struct dev_pagemap;
-1
arch/riscv/Kconfig
··· 46 46 select ARCH_HAS_PREEMPT_LAZY 47 47 select ARCH_HAS_PREPARE_SYNC_CORE_CMD 48 48 select ARCH_HAS_PTDUMP if MMU 49 - select ARCH_HAS_PTE_DEVMAP if 64BIT && MMU 50 49 select ARCH_HAS_PTE_SPECIAL 51 50 select ARCH_HAS_SET_DIRECT_MAP if MMU 52 51 select ARCH_HAS_SET_MEMORY if MMU
-16
arch/riscv/include/asm/pgtable-64.h
··· 397 397 p4d_t *p4d_offset(pgd_t *pgd, unsigned long address); 398 398 399 399 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 400 - static inline int pte_devmap(pte_t pte); 401 400 static inline pte_t pmd_pte(pmd_t pmd); 402 401 static inline pte_t pud_pte(pud_t pud); 403 - 404 - static inline int pmd_devmap(pmd_t pmd) 405 - { 406 - return pte_devmap(pmd_pte(pmd)); 407 - } 408 - 409 - static inline int pud_devmap(pud_t pud) 410 - { 411 - return pte_devmap(pud_pte(pud)); 412 - } 413 - 414 - static inline int pgd_devmap(pgd_t pgd) 415 - { 416 - return 0; 417 - } 418 402 #endif 419 403 420 404 #endif /* _ASM_RISCV_PGTABLE_64_H */
-1
arch/riscv/include/asm/pgtable-bits.h
··· 19 19 #define _PAGE_SOFT (3 << 8) /* Reserved for software */ 20 20 21 21 #define _PAGE_SPECIAL (1 << 8) /* RSW: 0x1 */ 22 - #define _PAGE_DEVMAP (1 << 9) /* RSW, devmap */ 23 22 #define _PAGE_TABLE _PAGE_PRESENT 24 23 25 24 /*
-22
arch/riscv/include/asm/pgtable.h
··· 409 409 return pte_val(pte) & _PAGE_SPECIAL; 410 410 } 411 411 412 - #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP 413 - static inline int pte_devmap(pte_t pte) 414 - { 415 - return pte_val(pte) & _PAGE_DEVMAP; 416 - } 417 - #endif 418 - 419 412 /* static inline pte_t pte_rdprotect(pte_t pte) */ 420 413 421 414 static inline pte_t pte_wrprotect(pte_t pte) ··· 448 455 static inline pte_t pte_mkspecial(pte_t pte) 449 456 { 450 457 return __pte(pte_val(pte) | _PAGE_SPECIAL); 451 - } 452 - 453 - static inline pte_t pte_mkdevmap(pte_t pte) 454 - { 455 - return __pte(pte_val(pte) | _PAGE_DEVMAP); 456 458 } 457 459 458 460 static inline pte_t pte_mkhuge(pte_t pte) ··· 778 790 return pte_pmd(pte_mkdirty(pmd_pte(pmd))); 779 791 } 780 792 781 - static inline pmd_t pmd_mkdevmap(pmd_t pmd) 782 - { 783 - return pte_pmd(pte_mkdevmap(pmd_pte(pmd))); 784 - } 785 - 786 793 #ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP 787 794 static inline bool pmd_special(pmd_t pmd) 788 795 { ··· 927 944 static inline pud_t pud_mkhuge(pud_t pud) 928 945 { 929 946 return pud; 930 - } 931 - 932 - static inline pud_t pud_mkdevmap(pud_t pud) 933 - { 934 - return pte_pud(pte_mkdevmap(pud_pte(pud))); 935 947 } 936 948 937 949 static inline int pudp_set_access_flags(struct vm_area_struct *vma,
-1
arch/x86/Kconfig
··· 101 101 select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 102 102 select ARCH_HAS_PMEM_API if X86_64 103 103 select ARCH_HAS_PREEMPT_LAZY 104 - select ARCH_HAS_PTE_DEVMAP if X86_64 105 104 select ARCH_HAS_PTE_SPECIAL 106 105 select ARCH_HAS_HW_PTE_YOUNG 107 106 select ARCH_HAS_NONLEAF_PMD_YOUNG if PGTABLE_LEVELS > 2
+2 -49
arch/x86/include/asm/pgtable.h
··· 301 301 } 302 302 303 303 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 304 - /* NOTE: when predicate huge page, consider also pmd_devmap, or use pmd_leaf */ 305 304 static inline int pmd_trans_huge(pmd_t pmd) 306 305 { 307 - return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE; 306 + return (pmd_val(pmd) & _PAGE_PSE) == _PAGE_PSE; 308 307 } 309 308 310 309 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 311 310 static inline int pud_trans_huge(pud_t pud) 312 311 { 313 - return (pud_val(pud) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE; 312 + return (pud_val(pud) & _PAGE_PSE) == _PAGE_PSE; 314 313 } 315 314 #endif 316 315 ··· 318 319 { 319 320 return boot_cpu_has(X86_FEATURE_PSE); 320 321 } 321 - 322 - #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP 323 - static inline int pmd_devmap(pmd_t pmd) 324 - { 325 - return !!(pmd_val(pmd) & _PAGE_DEVMAP); 326 - } 327 - 328 - #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 329 - static inline int pud_devmap(pud_t pud) 330 - { 331 - return !!(pud_val(pud) & _PAGE_DEVMAP); 332 - } 333 - #else 334 - static inline int pud_devmap(pud_t pud) 335 - { 336 - return 0; 337 - } 338 - #endif 339 322 340 323 #ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP 341 324 static inline bool pmd_special(pmd_t pmd) ··· 342 361 return pud_set_flags(pud, _PAGE_SPECIAL); 343 362 } 344 363 #endif /* CONFIG_ARCH_SUPPORTS_PUD_PFNMAP */ 345 - 346 - static inline int pgd_devmap(pgd_t pgd) 347 - { 348 - return 0; 349 - } 350 - #endif 351 364 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 352 365 353 366 static inline pte_t pte_set_flags(pte_t pte, pteval_t set) ··· 502 527 return pte_set_flags(pte, _PAGE_SPECIAL); 503 528 } 504 529 505 - static inline pte_t pte_mkdevmap(pte_t pte) 506 - { 507 - return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP); 508 - } 509 - 510 530 /* See comments above mksaveddirty_shift() */ 511 531 static inline pmd_t pmd_mksaveddirty(pmd_t pmd) 512 532 { ··· 573 603 return pmd_set_flags(pmd, _PAGE_DIRTY); 574 604 } 575 605 576 - static inline pmd_t pmd_mkdevmap(pmd_t pmd) 577 - { 578 - return pmd_set_flags(pmd, _PAGE_DEVMAP); 579 - } 580 - 581 606 static inline pmd_t pmd_mkhuge(pmd_t pmd) 582 607 { 583 608 return pmd_set_flags(pmd, _PAGE_PSE); ··· 636 671 pud = pud_set_flags(pud, _PAGE_DIRTY | _PAGE_SOFT_DIRTY); 637 672 638 673 return pud_mksaveddirty(pud); 639 - } 640 - 641 - static inline pud_t pud_mkdevmap(pud_t pud) 642 - { 643 - return pud_set_flags(pud, _PAGE_DEVMAP); 644 674 } 645 675 646 676 static inline pud_t pud_mkhuge(pud_t pud) ··· 967 1007 { 968 1008 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE); 969 1009 } 970 - 971 - #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP 972 - static inline int pte_devmap(pte_t a) 973 - { 974 - return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP; 975 - } 976 - #endif 977 1010 978 1011 #define pte_accessible pte_accessible 979 1012 static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
+1 -4
arch/x86/include/asm/pgtable_types.h
··· 34 34 #define _PAGE_BIT_UFFD_WP _PAGE_BIT_SOFTW2 /* userfaultfd wrprotected */ 35 35 #define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */ 36 36 #define _PAGE_BIT_KERNEL_4K _PAGE_BIT_SOFTW3 /* page must not be converted to large */ 37 - #define _PAGE_BIT_DEVMAP _PAGE_BIT_SOFTW4 38 37 39 38 #ifdef CONFIG_X86_64 40 39 #define _PAGE_BIT_SAVED_DIRTY _PAGE_BIT_SOFTW5 /* Saved Dirty bit (leaf) */ ··· 120 121 121 122 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) 122 123 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) 123 - #define _PAGE_DEVMAP (_AT(u64, 1) << _PAGE_BIT_DEVMAP) 124 124 #define _PAGE_SOFTW4 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW4) 125 125 #else 126 126 #define _PAGE_NX (_AT(pteval_t, 0)) 127 - #define _PAGE_DEVMAP (_AT(pteval_t, 0)) 128 127 #define _PAGE_SOFTW4 (_AT(pteval_t, 0)) 129 128 #endif 130 129 ··· 151 154 #define _COMMON_PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ 152 155 _PAGE_SPECIAL | _PAGE_ACCESSED | \ 153 156 _PAGE_DIRTY_BITS | _PAGE_SOFT_DIRTY | \ 154 - _PAGE_DEVMAP | _PAGE_CC | _PAGE_UFFD_WP) 157 + _PAGE_CC | _PAGE_UFFD_WP) 155 158 #define _PAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PAT) 156 159 #define _HPAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PSE | _PAGE_PAT_LARGE) 157 160
-7
include/linux/mm.h
··· 2704 2704 } 2705 2705 #endif /* CONFIG_ARCH_SUPPORTS_PUD_PFNMAP */ 2706 2706 2707 - #ifndef CONFIG_ARCH_HAS_PTE_DEVMAP 2708 - static inline int pte_devmap(pte_t pte) 2709 - { 2710 - return 0; 2711 - } 2712 - #endif 2713 - 2714 2707 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, 2715 2708 spinlock_t **ptl); 2716 2709 static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
+2 -17
include/linux/pgtable.h
··· 1643 1643 } 1644 1644 #endif /* pud_write */ 1645 1645 1646 - #if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE) 1647 - static inline int pmd_devmap(pmd_t pmd) 1648 - { 1649 - return 0; 1650 - } 1651 - static inline int pud_devmap(pud_t pud) 1652 - { 1653 - return 0; 1654 - } 1655 - static inline int pgd_devmap(pgd_t pgd) 1656 - { 1657 - return 0; 1658 - } 1659 - #endif 1660 - 1661 1646 #if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \ 1662 1647 !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) 1663 1648 static inline int pud_trans_huge(pud_t pud) ··· 1897 1912 * - It should contain a huge PFN, which points to a huge page larger than 1898 1913 * PAGE_SIZE of the platform. The PFN format isn't important here. 1899 1914 * 1900 - * - It should cover all kinds of huge mappings (e.g., pXd_trans_huge(), 1901 - * pXd_devmap(), or hugetlb mappings). 1915 + * - It should cover all kinds of huge mappings (i.e. pXd_trans_huge() 1916 + * or hugetlb mappings). 1902 1917 */ 1903 1918 #ifndef pgd_leaf 1904 1919 #define pgd_leaf(x) false
-4
mm/Kconfig
··· 1117 1117 register alias named "current_stack_pointer", this config can be 1118 1118 selected. 1119 1119 1120 - config ARCH_HAS_PTE_DEVMAP 1121 - bool 1122 - 1123 1120 config ARCH_HAS_ZONE_DMA_SET 1124 1121 bool 1125 1122 ··· 1134 1137 depends on MEMORY_HOTPLUG 1135 1138 depends on MEMORY_HOTREMOVE 1136 1139 depends on SPARSEMEM_VMEMMAP 1137 - depends on ARCH_HAS_PTE_DEVMAP 1138 1140 select XARRAY_MULTI 1139 1141 1140 1142 help
-59
mm/debug_vm_pgtable.c
··· 348 348 vaddr &= HPAGE_PUD_MASK; 349 349 350 350 pud = pfn_pud(args->pud_pfn, args->page_prot); 351 - /* 352 - * Some architectures have debug checks to make sure 353 - * huge pud mapping are only found with devmap entries 354 - * For now test with only devmap entries. 355 - */ 356 - pud = pud_mkdevmap(pud); 357 351 set_pud_at(args->mm, vaddr, args->pudp, pud); 358 352 flush_dcache_page(page); 359 353 pudp_set_wrprotect(args->mm, vaddr, args->pudp); ··· 360 366 WARN_ON(!pud_none(pud)); 361 367 #endif /* __PAGETABLE_PMD_FOLDED */ 362 368 pud = pfn_pud(args->pud_pfn, args->page_prot); 363 - pud = pud_mkdevmap(pud); 364 369 pud = pud_wrprotect(pud); 365 370 pud = pud_mkclean(pud); 366 371 set_pud_at(args->mm, vaddr, args->pudp, pud); ··· 377 384 #endif /* __PAGETABLE_PMD_FOLDED */ 378 385 379 386 pud = pfn_pud(args->pud_pfn, args->page_prot); 380 - pud = pud_mkdevmap(pud); 381 387 pud = pud_mkyoung(pud); 382 388 set_pud_at(args->mm, vaddr, args->pudp, pud); 383 389 flush_dcache_page(page); ··· 684 692 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ 685 693 static void __init pmd_protnone_tests(struct pgtable_debug_args *args) { } 686 694 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 687 - 688 - #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP 689 - static void __init pte_devmap_tests(struct pgtable_debug_args *args) 690 - { 691 - pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot); 692 - 693 - pr_debug("Validating PTE devmap\n"); 694 - WARN_ON(!pte_devmap(pte_mkdevmap(pte))); 695 - } 696 - 697 - #ifdef CONFIG_TRANSPARENT_HUGEPAGE 698 - static void __init pmd_devmap_tests(struct pgtable_debug_args *args) 699 - { 700 - pmd_t pmd; 701 - 702 - if (!has_transparent_hugepage()) 703 - return; 704 - 705 - pr_debug("Validating PMD devmap\n"); 706 - pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); 707 - WARN_ON(!pmd_devmap(pmd_mkdevmap(pmd))); 708 - } 709 - 710 - #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 711 - static void __init pud_devmap_tests(struct pgtable_debug_args *args) 712 - { 713 - pud_t pud; 714 - 715 - if (!has_transparent_pud_hugepage()) 716 - return; 717 - 718 - pr_debug("Validating PUD devmap\n"); 719 - pud = pfn_pud(args->fixed_pud_pfn, args->page_prot); 720 - WARN_ON(!pud_devmap(pud_mkdevmap(pud))); 721 - } 722 - #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 723 - static void __init pud_devmap_tests(struct pgtable_debug_args *args) { } 724 - #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 725 - #else /* CONFIG_TRANSPARENT_HUGEPAGE */ 726 - static void __init pmd_devmap_tests(struct pgtable_debug_args *args) { } 727 - static void __init pud_devmap_tests(struct pgtable_debug_args *args) { } 728 - #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 729 - #else 730 - static void __init pte_devmap_tests(struct pgtable_debug_args *args) { } 731 - static void __init pmd_devmap_tests(struct pgtable_debug_args *args) { } 732 - static void __init pud_devmap_tests(struct pgtable_debug_args *args) { } 733 - #endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */ 734 695 735 696 static void __init pte_soft_dirty_tests(struct pgtable_debug_args *args) 736 697 { ··· 1277 1332 pte_special_tests(&args); 1278 1333 pte_protnone_tests(&args); 1279 1334 pmd_protnone_tests(&args); 1280 - 1281 - pte_devmap_tests(&args); 1282 - pmd_devmap_tests(&args); 1283 - pud_devmap_tests(&args); 1284 1335 1285 1336 pte_soft_dirty_tests(&args); 1286 1337 pmd_soft_dirty_tests(&args);
+1 -2
mm/hmm.c
··· 405 405 return 0; 406 406 } 407 407 408 - #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \ 409 - defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) 408 + #if defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) 410 409 static inline unsigned long pud_to_hmm_pfn_flags(struct hmm_range *range, 411 410 pud_t pud) 412 411 {
+4 -4
mm/madvise.c
··· 1069 1069 pud_t pudval = pudp_get(pud); 1070 1070 1071 1071 /* If huge return >0 so we abort the operation + zap. */ 1072 - return pud_trans_huge(pudval) || pud_devmap(pudval); 1072 + return pud_trans_huge(pudval); 1073 1073 } 1074 1074 1075 1075 static int guard_install_pmd_entry(pmd_t *pmd, unsigned long addr, ··· 1078 1078 pmd_t pmdval = pmdp_get(pmd); 1079 1079 1080 1080 /* If huge return >0 so we abort the operation + zap. */ 1081 - return pmd_trans_huge(pmdval) || pmd_devmap(pmdval); 1081 + return pmd_trans_huge(pmdval); 1082 1082 } 1083 1083 1084 1084 static int guard_install_pte_entry(pte_t *pte, unsigned long addr, ··· 1189 1189 pud_t pudval = pudp_get(pud); 1190 1190 1191 1191 /* If huge, cannot have guard pages present, so no-op - skip. */ 1192 - if (pud_trans_huge(pudval) || pud_devmap(pudval)) 1192 + if (pud_trans_huge(pudval)) 1193 1193 walk->action = ACTION_CONTINUE; 1194 1194 1195 1195 return 0; ··· 1201 1201 pmd_t pmdval = pmdp_get(pmd); 1202 1202 1203 1203 /* If huge, cannot have guard pages present, so no-op - skip. */ 1204 - if (pmd_trans_huge(pmdval) || pmd_devmap(pmdval)) 1204 + if (pmd_trans_huge(pmdval)) 1205 1205 walk->action = ACTION_CONTINUE; 1206 1206 1207 1207 return 0;