at v4.3 21 kB view raw
1#ifndef _ASM_GENERIC_PGTABLE_H 2#define _ASM_GENERIC_PGTABLE_H 3 4#ifndef __ASSEMBLY__ 5#ifdef CONFIG_MMU 6 7#include <linux/mm_types.h> 8#include <linux/bug.h> 9#include <linux/errno.h> 10 11#if 4 - defined(__PAGETABLE_PUD_FOLDED) - defined(__PAGETABLE_PMD_FOLDED) != \ 12 CONFIG_PGTABLE_LEVELS 13#error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{PUD,PMD}_FOLDED 14#endif 15 16/* 17 * On almost all architectures and configurations, 0 can be used as the 18 * upper ceiling to free_pgtables(): on many architectures it has the same 19 * effect as using TASK_SIZE. However, there is one configuration which 20 * must impose a more careful limit, to avoid freeing kernel pgtables. 21 */ 22#ifndef USER_PGTABLES_CEILING 23#define USER_PGTABLES_CEILING 0UL 24#endif 25 26#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 27extern int ptep_set_access_flags(struct vm_area_struct *vma, 28 unsigned long address, pte_t *ptep, 29 pte_t entry, int dirty); 30#endif 31 32#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 33extern int pmdp_set_access_flags(struct vm_area_struct *vma, 34 unsigned long address, pmd_t *pmdp, 35 pmd_t entry, int dirty); 36#endif 37 38#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 39static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, 40 unsigned long address, 41 pte_t *ptep) 42{ 43 pte_t pte = *ptep; 44 int r = 1; 45 if (!pte_young(pte)) 46 r = 0; 47 else 48 set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte)); 49 return r; 50} 51#endif 52 53#ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG 54#ifdef CONFIG_TRANSPARENT_HUGEPAGE 55static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, 56 unsigned long address, 57 pmd_t *pmdp) 58{ 59 pmd_t pmd = *pmdp; 60 int r = 1; 61 if (!pmd_young(pmd)) 62 r = 0; 63 else 64 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd)); 65 return r; 66} 67#else /* CONFIG_TRANSPARENT_HUGEPAGE */ 68static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, 69 unsigned long address, 70 pmd_t *pmdp) 71{ 72 BUG(); 73 return 0; 74} 75#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 76#endif 77 78#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 79int ptep_clear_flush_young(struct vm_area_struct *vma, 80 unsigned long address, pte_t *ptep); 81#endif 82 83#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH 84int pmdp_clear_flush_young(struct vm_area_struct *vma, 85 unsigned long address, pmd_t *pmdp); 86#endif 87 88#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR 89static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 90 unsigned long address, 91 pte_t *ptep) 92{ 93 pte_t pte = *ptep; 94 pte_clear(mm, address, ptep); 95 return pte; 96} 97#endif 98 99#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR 100#ifdef CONFIG_TRANSPARENT_HUGEPAGE 101static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, 102 unsigned long address, 103 pmd_t *pmdp) 104{ 105 pmd_t pmd = *pmdp; 106 pmd_clear(pmdp); 107 return pmd; 108} 109#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 110#endif 111 112#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL 113#ifdef CONFIG_TRANSPARENT_HUGEPAGE 114static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm, 115 unsigned long address, pmd_t *pmdp, 116 int full) 117{ 118 return pmdp_huge_get_and_clear(mm, address, pmdp); 119} 120#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 121#endif 122 123#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL 124static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, 125 unsigned long address, pte_t *ptep, 126 int full) 127{ 128 pte_t pte; 129 pte = ptep_get_and_clear(mm, address, ptep); 130 return pte; 131} 132#endif 133 134/* 135 * Some architectures may be able to avoid expensive synchronization 136 * primitives when modifications are made to PTE's which are already 137 * not present, or in the process of an address space destruction. 138 */ 139#ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL 140static inline void pte_clear_not_present_full(struct mm_struct *mm, 141 unsigned long address, 142 pte_t *ptep, 143 int full) 144{ 145 pte_clear(mm, address, ptep); 146} 147#endif 148 149#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH 150extern pte_t ptep_clear_flush(struct vm_area_struct *vma, 151 unsigned long address, 152 pte_t *ptep); 153#endif 154 155#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH 156extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, 157 unsigned long address, 158 pmd_t *pmdp); 159#endif 160 161#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT 162struct mm_struct; 163static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) 164{ 165 pte_t old_pte = *ptep; 166 set_pte_at(mm, address, ptep, pte_wrprotect(old_pte)); 167} 168#endif 169 170#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT 171#ifdef CONFIG_TRANSPARENT_HUGEPAGE 172static inline void pmdp_set_wrprotect(struct mm_struct *mm, 173 unsigned long address, pmd_t *pmdp) 174{ 175 pmd_t old_pmd = *pmdp; 176 set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd)); 177} 178#else /* CONFIG_TRANSPARENT_HUGEPAGE */ 179static inline void pmdp_set_wrprotect(struct mm_struct *mm, 180 unsigned long address, pmd_t *pmdp) 181{ 182 BUG(); 183} 184#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 185#endif 186 187#ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH 188extern void pmdp_splitting_flush(struct vm_area_struct *vma, 189 unsigned long address, pmd_t *pmdp); 190#endif 191 192#ifndef pmdp_collapse_flush 193#ifdef CONFIG_TRANSPARENT_HUGEPAGE 194extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, 195 unsigned long address, pmd_t *pmdp); 196#else 197static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, 198 unsigned long address, 199 pmd_t *pmdp) 200{ 201 BUILD_BUG(); 202 return *pmdp; 203} 204#define pmdp_collapse_flush pmdp_collapse_flush 205#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 206#endif 207 208#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT 209extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 210 pgtable_t pgtable); 211#endif 212 213#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW 214extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); 215#endif 216 217#ifndef __HAVE_ARCH_PMDP_INVALIDATE 218extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 219 pmd_t *pmdp); 220#endif 221 222#ifndef __HAVE_ARCH_PTE_SAME 223static inline int pte_same(pte_t pte_a, pte_t pte_b) 224{ 225 return pte_val(pte_a) == pte_val(pte_b); 226} 227#endif 228 229#ifndef __HAVE_ARCH_PTE_UNUSED 230/* 231 * Some architectures provide facilities to virtualization guests 232 * so that they can flag allocated pages as unused. This allows the 233 * host to transparently reclaim unused pages. This function returns 234 * whether the pte's page is unused. 235 */ 236static inline int pte_unused(pte_t pte) 237{ 238 return 0; 239} 240#endif 241 242#ifndef __HAVE_ARCH_PMD_SAME 243#ifdef CONFIG_TRANSPARENT_HUGEPAGE 244static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) 245{ 246 return pmd_val(pmd_a) == pmd_val(pmd_b); 247} 248#else /* CONFIG_TRANSPARENT_HUGEPAGE */ 249static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) 250{ 251 BUG(); 252 return 0; 253} 254#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 255#endif 256 257#ifndef __HAVE_ARCH_PGD_OFFSET_GATE 258#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr) 259#endif 260 261#ifndef __HAVE_ARCH_MOVE_PTE 262#define move_pte(pte, prot, old_addr, new_addr) (pte) 263#endif 264 265#ifndef pte_accessible 266# define pte_accessible(mm, pte) ((void)(pte), 1) 267#endif 268 269#ifndef flush_tlb_fix_spurious_fault 270#define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address) 271#endif 272 273#ifndef pgprot_noncached 274#define pgprot_noncached(prot) (prot) 275#endif 276 277#ifndef pgprot_writecombine 278#define pgprot_writecombine pgprot_noncached 279#endif 280 281#ifndef pgprot_writethrough 282#define pgprot_writethrough pgprot_noncached 283#endif 284 285#ifndef pgprot_device 286#define pgprot_device pgprot_noncached 287#endif 288 289#ifndef pgprot_modify 290#define pgprot_modify pgprot_modify 291static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) 292{ 293 if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot))) 294 newprot = pgprot_noncached(newprot); 295 if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot))) 296 newprot = pgprot_writecombine(newprot); 297 if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot))) 298 newprot = pgprot_device(newprot); 299 return newprot; 300} 301#endif 302 303/* 304 * When walking page tables, get the address of the next boundary, 305 * or the end address of the range if that comes earlier. Although no 306 * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout. 307 */ 308 309#define pgd_addr_end(addr, end) \ 310({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \ 311 (__boundary - 1 < (end) - 1)? __boundary: (end); \ 312}) 313 314#ifndef pud_addr_end 315#define pud_addr_end(addr, end) \ 316({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \ 317 (__boundary - 1 < (end) - 1)? __boundary: (end); \ 318}) 319#endif 320 321#ifndef pmd_addr_end 322#define pmd_addr_end(addr, end) \ 323({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \ 324 (__boundary - 1 < (end) - 1)? __boundary: (end); \ 325}) 326#endif 327 328/* 329 * When walking page tables, we usually want to skip any p?d_none entries; 330 * and any p?d_bad entries - reporting the error before resetting to none. 331 * Do the tests inline, but report and clear the bad entry in mm/memory.c. 332 */ 333void pgd_clear_bad(pgd_t *); 334void pud_clear_bad(pud_t *); 335void pmd_clear_bad(pmd_t *); 336 337static inline int pgd_none_or_clear_bad(pgd_t *pgd) 338{ 339 if (pgd_none(*pgd)) 340 return 1; 341 if (unlikely(pgd_bad(*pgd))) { 342 pgd_clear_bad(pgd); 343 return 1; 344 } 345 return 0; 346} 347 348static inline int pud_none_or_clear_bad(pud_t *pud) 349{ 350 if (pud_none(*pud)) 351 return 1; 352 if (unlikely(pud_bad(*pud))) { 353 pud_clear_bad(pud); 354 return 1; 355 } 356 return 0; 357} 358 359static inline int pmd_none_or_clear_bad(pmd_t *pmd) 360{ 361 if (pmd_none(*pmd)) 362 return 1; 363 if (unlikely(pmd_bad(*pmd))) { 364 pmd_clear_bad(pmd); 365 return 1; 366 } 367 return 0; 368} 369 370static inline pte_t __ptep_modify_prot_start(struct mm_struct *mm, 371 unsigned long addr, 372 pte_t *ptep) 373{ 374 /* 375 * Get the current pte state, but zero it out to make it 376 * non-present, preventing the hardware from asynchronously 377 * updating it. 378 */ 379 return ptep_get_and_clear(mm, addr, ptep); 380} 381 382static inline void __ptep_modify_prot_commit(struct mm_struct *mm, 383 unsigned long addr, 384 pte_t *ptep, pte_t pte) 385{ 386 /* 387 * The pte is non-present, so there's no hardware state to 388 * preserve. 389 */ 390 set_pte_at(mm, addr, ptep, pte); 391} 392 393#ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION 394/* 395 * Start a pte protection read-modify-write transaction, which 396 * protects against asynchronous hardware modifications to the pte. 397 * The intention is not to prevent the hardware from making pte 398 * updates, but to prevent any updates it may make from being lost. 399 * 400 * This does not protect against other software modifications of the 401 * pte; the appropriate pte lock must be held over the transation. 402 * 403 * Note that this interface is intended to be batchable, meaning that 404 * ptep_modify_prot_commit may not actually update the pte, but merely 405 * queue the update to be done at some later time. The update must be 406 * actually committed before the pte lock is released, however. 407 */ 408static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, 409 unsigned long addr, 410 pte_t *ptep) 411{ 412 return __ptep_modify_prot_start(mm, addr, ptep); 413} 414 415/* 416 * Commit an update to a pte, leaving any hardware-controlled bits in 417 * the PTE unmodified. 418 */ 419static inline void ptep_modify_prot_commit(struct mm_struct *mm, 420 unsigned long addr, 421 pte_t *ptep, pte_t pte) 422{ 423 __ptep_modify_prot_commit(mm, addr, ptep, pte); 424} 425#endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */ 426#endif /* CONFIG_MMU */ 427 428/* 429 * A facility to provide lazy MMU batching. This allows PTE updates and 430 * page invalidations to be delayed until a call to leave lazy MMU mode 431 * is issued. Some architectures may benefit from doing this, and it is 432 * beneficial for both shadow and direct mode hypervisors, which may batch 433 * the PTE updates which happen during this window. Note that using this 434 * interface requires that read hazards be removed from the code. A read 435 * hazard could result in the direct mode hypervisor case, since the actual 436 * write to the page tables may not yet have taken place, so reads though 437 * a raw PTE pointer after it has been modified are not guaranteed to be 438 * up to date. This mode can only be entered and left under the protection of 439 * the page table locks for all page tables which may be modified. In the UP 440 * case, this is required so that preemption is disabled, and in the SMP case, 441 * it must synchronize the delayed page table writes properly on other CPUs. 442 */ 443#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE 444#define arch_enter_lazy_mmu_mode() do {} while (0) 445#define arch_leave_lazy_mmu_mode() do {} while (0) 446#define arch_flush_lazy_mmu_mode() do {} while (0) 447#endif 448 449/* 450 * A facility to provide batching of the reload of page tables and 451 * other process state with the actual context switch code for 452 * paravirtualized guests. By convention, only one of the batched 453 * update (lazy) modes (CPU, MMU) should be active at any given time, 454 * entry should never be nested, and entry and exits should always be 455 * paired. This is for sanity of maintaining and reasoning about the 456 * kernel code. In this case, the exit (end of the context switch) is 457 * in architecture-specific code, and so doesn't need a generic 458 * definition. 459 */ 460#ifndef __HAVE_ARCH_START_CONTEXT_SWITCH 461#define arch_start_context_switch(prev) do {} while (0) 462#endif 463 464#ifndef CONFIG_HAVE_ARCH_SOFT_DIRTY 465static inline int pte_soft_dirty(pte_t pte) 466{ 467 return 0; 468} 469 470static inline int pmd_soft_dirty(pmd_t pmd) 471{ 472 return 0; 473} 474 475static inline pte_t pte_mksoft_dirty(pte_t pte) 476{ 477 return pte; 478} 479 480static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) 481{ 482 return pmd; 483} 484 485static inline pte_t pte_swp_mksoft_dirty(pte_t pte) 486{ 487 return pte; 488} 489 490static inline int pte_swp_soft_dirty(pte_t pte) 491{ 492 return 0; 493} 494 495static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) 496{ 497 return pte; 498} 499#endif 500 501#ifndef __HAVE_PFNMAP_TRACKING 502/* 503 * Interfaces that can be used by architecture code to keep track of 504 * memory type of pfn mappings specified by the remap_pfn_range, 505 * vm_insert_pfn. 506 */ 507 508/* 509 * track_pfn_remap is called when a _new_ pfn mapping is being established 510 * by remap_pfn_range() for physical range indicated by pfn and size. 511 */ 512static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, 513 unsigned long pfn, unsigned long addr, 514 unsigned long size) 515{ 516 return 0; 517} 518 519/* 520 * track_pfn_insert is called when a _new_ single pfn is established 521 * by vm_insert_pfn(). 522 */ 523static inline int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, 524 unsigned long pfn) 525{ 526 return 0; 527} 528 529/* 530 * track_pfn_copy is called when vma that is covering the pfnmap gets 531 * copied through copy_page_range(). 532 */ 533static inline int track_pfn_copy(struct vm_area_struct *vma) 534{ 535 return 0; 536} 537 538/* 539 * untrack_pfn_vma is called while unmapping a pfnmap for a region. 540 * untrack can be called for a specific region indicated by pfn and size or 541 * can be for the entire vma (in which case pfn, size are zero). 542 */ 543static inline void untrack_pfn(struct vm_area_struct *vma, 544 unsigned long pfn, unsigned long size) 545{ 546} 547#else 548extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, 549 unsigned long pfn, unsigned long addr, 550 unsigned long size); 551extern int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, 552 unsigned long pfn); 553extern int track_pfn_copy(struct vm_area_struct *vma); 554extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, 555 unsigned long size); 556#endif 557 558#ifdef __HAVE_COLOR_ZERO_PAGE 559static inline int is_zero_pfn(unsigned long pfn) 560{ 561 extern unsigned long zero_pfn; 562 unsigned long offset_from_zero_pfn = pfn - zero_pfn; 563 return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT); 564} 565 566#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr)) 567 568#else 569static inline int is_zero_pfn(unsigned long pfn) 570{ 571 extern unsigned long zero_pfn; 572 return pfn == zero_pfn; 573} 574 575static inline unsigned long my_zero_pfn(unsigned long addr) 576{ 577 extern unsigned long zero_pfn; 578 return zero_pfn; 579} 580#endif 581 582#ifdef CONFIG_MMU 583 584#ifndef CONFIG_TRANSPARENT_HUGEPAGE 585static inline int pmd_trans_huge(pmd_t pmd) 586{ 587 return 0; 588} 589static inline int pmd_trans_splitting(pmd_t pmd) 590{ 591 return 0; 592} 593#ifndef __HAVE_ARCH_PMD_WRITE 594static inline int pmd_write(pmd_t pmd) 595{ 596 BUG(); 597 return 0; 598} 599#endif /* __HAVE_ARCH_PMD_WRITE */ 600#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 601 602#ifndef pmd_read_atomic 603static inline pmd_t pmd_read_atomic(pmd_t *pmdp) 604{ 605 /* 606 * Depend on compiler for an atomic pmd read. NOTE: this is 607 * only going to work, if the pmdval_t isn't larger than 608 * an unsigned long. 609 */ 610 return *pmdp; 611} 612#endif 613 614#ifndef pmd_move_must_withdraw 615static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl, 616 spinlock_t *old_pmd_ptl) 617{ 618 /* 619 * With split pmd lock we also need to move preallocated 620 * PTE page table if new_pmd is on different PMD page table. 621 */ 622 return new_pmd_ptl != old_pmd_ptl; 623} 624#endif 625 626/* 627 * This function is meant to be used by sites walking pagetables with 628 * the mmap_sem hold in read mode to protect against MADV_DONTNEED and 629 * transhuge page faults. MADV_DONTNEED can convert a transhuge pmd 630 * into a null pmd and the transhuge page fault can convert a null pmd 631 * into an hugepmd or into a regular pmd (if the hugepage allocation 632 * fails). While holding the mmap_sem in read mode the pmd becomes 633 * stable and stops changing under us only if it's not null and not a 634 * transhuge pmd. When those races occurs and this function makes a 635 * difference vs the standard pmd_none_or_clear_bad, the result is 636 * undefined so behaving like if the pmd was none is safe (because it 637 * can return none anyway). The compiler level barrier() is critically 638 * important to compute the two checks atomically on the same pmdval. 639 * 640 * For 32bit kernels with a 64bit large pmd_t this automatically takes 641 * care of reading the pmd atomically to avoid SMP race conditions 642 * against pmd_populate() when the mmap_sem is hold for reading by the 643 * caller (a special atomic read not done by "gcc" as in the generic 644 * version above, is also needed when THP is disabled because the page 645 * fault can populate the pmd from under us). 646 */ 647static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) 648{ 649 pmd_t pmdval = pmd_read_atomic(pmd); 650 /* 651 * The barrier will stabilize the pmdval in a register or on 652 * the stack so that it will stop changing under the code. 653 * 654 * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE, 655 * pmd_read_atomic is allowed to return a not atomic pmdval 656 * (for example pointing to an hugepage that has never been 657 * mapped in the pmd). The below checks will only care about 658 * the low part of the pmd with 32bit PAE x86 anyway, with the 659 * exception of pmd_none(). So the important thing is that if 660 * the low part of the pmd is found null, the high part will 661 * be also null or the pmd_none() check below would be 662 * confused. 663 */ 664#ifdef CONFIG_TRANSPARENT_HUGEPAGE 665 barrier(); 666#endif 667 if (pmd_none(pmdval) || pmd_trans_huge(pmdval)) 668 return 1; 669 if (unlikely(pmd_bad(pmdval))) { 670 pmd_clear_bad(pmd); 671 return 1; 672 } 673 return 0; 674} 675 676/* 677 * This is a noop if Transparent Hugepage Support is not built into 678 * the kernel. Otherwise it is equivalent to 679 * pmd_none_or_trans_huge_or_clear_bad(), and shall only be called in 680 * places that already verified the pmd is not none and they want to 681 * walk ptes while holding the mmap sem in read mode (write mode don't 682 * need this). If THP is not enabled, the pmd can't go away under the 683 * code even if MADV_DONTNEED runs, but if THP is enabled we need to 684 * run a pmd_trans_unstable before walking the ptes after 685 * split_huge_page_pmd returns (because it may have run when the pmd 686 * become null, but then a page fault can map in a THP and not a 687 * regular page). 688 */ 689static inline int pmd_trans_unstable(pmd_t *pmd) 690{ 691#ifdef CONFIG_TRANSPARENT_HUGEPAGE 692 return pmd_none_or_trans_huge_or_clear_bad(pmd); 693#else 694 return 0; 695#endif 696} 697 698#ifndef CONFIG_NUMA_BALANCING 699/* 700 * Technically a PTE can be PROTNONE even when not doing NUMA balancing but 701 * the only case the kernel cares is for NUMA balancing and is only ever set 702 * when the VMA is accessible. For PROT_NONE VMAs, the PTEs are not marked 703 * _PAGE_PROTNONE so by by default, implement the helper as "always no". It 704 * is the responsibility of the caller to distinguish between PROT_NONE 705 * protections and NUMA hinting fault protections. 706 */ 707static inline int pte_protnone(pte_t pte) 708{ 709 return 0; 710} 711 712static inline int pmd_protnone(pmd_t pmd) 713{ 714 return 0; 715} 716#endif /* CONFIG_NUMA_BALANCING */ 717 718#endif /* CONFIG_MMU */ 719 720#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP 721int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot); 722int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); 723int pud_clear_huge(pud_t *pud); 724int pmd_clear_huge(pmd_t *pmd); 725#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ 726static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) 727{ 728 return 0; 729} 730static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) 731{ 732 return 0; 733} 734static inline int pud_clear_huge(pud_t *pud) 735{ 736 return 0; 737} 738static inline int pmd_clear_huge(pmd_t *pmd) 739{ 740 return 0; 741} 742#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ 743 744#endif /* !__ASSEMBLY__ */ 745 746#ifndef io_remap_pfn_range 747#define io_remap_pfn_range remap_pfn_range 748#endif 749 750#endif /* _ASM_GENERIC_PGTABLE_H */