Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.14-rc1 576 lines 17 kB view raw
1#ifndef _ASM_POWERPC_PGTABLE_PPC64_H_ 2#define _ASM_POWERPC_PGTABLE_PPC64_H_ 3/* 4 * This file contains the functions and defines necessary to modify and use 5 * the ppc64 hashed page table. 6 */ 7 8#ifdef CONFIG_PPC_64K_PAGES 9#include <asm/pgtable-ppc64-64k.h> 10#else 11#include <asm/pgtable-ppc64-4k.h> 12#endif 13#include <asm/barrier.h> 14 15#define FIRST_USER_ADDRESS 0 16 17/* 18 * Size of EA range mapped by our pagetables. 19 */ 20#define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \ 21 PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT) 22#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE) 23 24#ifdef CONFIG_TRANSPARENT_HUGEPAGE 25#define PMD_CACHE_INDEX (PMD_INDEX_SIZE + 1) 26#else 27#define PMD_CACHE_INDEX PMD_INDEX_SIZE 28#endif 29/* 30 * Define the address range of the kernel non-linear virtual area 31 */ 32 33#ifdef CONFIG_PPC_BOOK3E 34#define KERN_VIRT_START ASM_CONST(0x8000000000000000) 35#else 36#define KERN_VIRT_START ASM_CONST(0xD000000000000000) 37#endif 38#define KERN_VIRT_SIZE ASM_CONST(0x0000100000000000) 39 40/* 41 * The vmalloc space starts at the beginning of that region, and 42 * occupies half of it on hash CPUs and a quarter of it on Book3E 43 * (we keep a quarter for the virtual memmap) 44 */ 45#define VMALLOC_START KERN_VIRT_START 46#ifdef CONFIG_PPC_BOOK3E 47#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 2) 48#else 49#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) 50#endif 51#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) 52 53/* 54 * The second half of the kernel virtual space is used for IO mappings, 55 * it's itself carved into the PIO region (ISA and PHB IO space) and 56 * the ioremap space 57 * 58 * ISA_IO_BASE = KERN_IO_START, 64K reserved area 59 * PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces 60 * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE 61 */ 62#define KERN_IO_START (KERN_VIRT_START + (KERN_VIRT_SIZE >> 1)) 63#define FULL_IO_SIZE 0x80000000ul 64#define ISA_IO_BASE (KERN_IO_START) 65#define ISA_IO_END (KERN_IO_START + 0x10000ul) 66#define PHB_IO_BASE (ISA_IO_END) 67#define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE) 68#define IOREMAP_BASE (PHB_IO_END) 69#define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE) 70 71 72/* 73 * Region IDs 74 */ 75#define REGION_SHIFT 60UL 76#define REGION_MASK (0xfUL << REGION_SHIFT) 77#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT) 78 79#define VMALLOC_REGION_ID (REGION_ID(VMALLOC_START)) 80#define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET)) 81#define VMEMMAP_REGION_ID (0xfUL) /* Server only */ 82#define USER_REGION_ID (0UL) 83 84/* 85 * Defines the address of the vmemap area, in its own region on 86 * hash table CPUs and after the vmalloc space on Book3E 87 */ 88#ifdef CONFIG_PPC_BOOK3E 89#define VMEMMAP_BASE VMALLOC_END 90#define VMEMMAP_END KERN_IO_START 91#else 92#define VMEMMAP_BASE (VMEMMAP_REGION_ID << REGION_SHIFT) 93#endif 94#define vmemmap ((struct page *)VMEMMAP_BASE) 95 96 97/* 98 * Include the PTE bits definitions 99 */ 100#ifdef CONFIG_PPC_BOOK3S 101#include <asm/pte-hash64.h> 102#else 103#include <asm/pte-book3e.h> 104#endif 105#include <asm/pte-common.h> 106 107#ifdef CONFIG_PPC_MM_SLICES 108#define HAVE_ARCH_UNMAPPED_AREA 109#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 110#endif /* CONFIG_PPC_MM_SLICES */ 111 112#ifndef __ASSEMBLY__ 113 114/* 115 * This is the default implementation of various PTE accessors, it's 116 * used in all cases except Book3S with 64K pages where we have a 117 * concept of sub-pages 118 */ 119#ifndef __real_pte 120 121#ifdef STRICT_MM_TYPECHECKS 122#define __real_pte(e,p) ((real_pte_t){(e)}) 123#define __rpte_to_pte(r) ((r).pte) 124#else 125#define __real_pte(e,p) (e) 126#define __rpte_to_pte(r) (__pte(r)) 127#endif 128#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> 12) 129 130#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \ 131 do { \ 132 index = 0; \ 133 shift = mmu_psize_defs[psize].shift; \ 134 135#define pte_iterate_hashed_end() } while(0) 136 137#ifdef CONFIG_PPC_HAS_HASH_64K 138#define pte_pagesize_index(mm, addr, pte) get_slice_psize(mm, addr) 139#else 140#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K 141#endif 142 143#endif /* __real_pte */ 144 145 146/* pte_clear moved to later in this file */ 147 148#define PMD_BAD_BITS (PTE_TABLE_SIZE-1) 149#define PUD_BAD_BITS (PMD_TABLE_SIZE-1) 150 151#define pmd_set(pmdp, pmdval) (pmd_val(*(pmdp)) = (pmdval)) 152#define pmd_none(pmd) (!pmd_val(pmd)) 153#define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \ 154 || (pmd_val(pmd) & PMD_BAD_BITS)) 155#define pmd_present(pmd) (pmd_val(pmd) != 0) 156#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) 157#define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS) 158extern struct page *pmd_page(pmd_t pmd); 159 160#define pud_set(pudp, pudval) (pud_val(*(pudp)) = (pudval)) 161#define pud_none(pud) (!pud_val(pud)) 162#define pud_bad(pud) (!is_kernel_addr(pud_val(pud)) \ 163 || (pud_val(pud) & PUD_BAD_BITS)) 164#define pud_present(pud) (pud_val(pud) != 0) 165#define pud_clear(pudp) (pud_val(*(pudp)) = 0) 166#define pud_page_vaddr(pud) (pud_val(pud) & ~PUD_MASKED_BITS) 167#define pud_page(pud) virt_to_page(pud_page_vaddr(pud)) 168 169#define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);}) 170 171/* 172 * Find an entry in a page-table-directory. We combine the address region 173 * (the high order N bits) and the pgd portion of the address. 174 */ 175#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1)) 176 177#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) 178 179#define pmd_offset(pudp,addr) \ 180 (((pmd_t *) pud_page_vaddr(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) 181 182#define pte_offset_kernel(dir,addr) \ 183 (((pte_t *) pmd_page_vaddr(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) 184 185#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) 186#define pte_unmap(pte) do { } while(0) 187 188/* to find an entry in a kernel page-table-directory */ 189/* This now only contains the vmalloc pages */ 190#define pgd_offset_k(address) pgd_offset(&init_mm, address) 191extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr, 192 pte_t *ptep, unsigned long pte, int huge); 193 194/* Atomic PTE updates */ 195static inline unsigned long pte_update(struct mm_struct *mm, 196 unsigned long addr, 197 pte_t *ptep, unsigned long clr, 198 int huge) 199{ 200#ifdef PTE_ATOMIC_UPDATES 201 unsigned long old, tmp; 202 203 __asm__ __volatile__( 204 "1: ldarx %0,0,%3 # pte_update\n\ 205 andi. %1,%0,%6\n\ 206 bne- 1b \n\ 207 andc %1,%0,%4 \n\ 208 stdcx. %1,0,%3 \n\ 209 bne- 1b" 210 : "=&r" (old), "=&r" (tmp), "=m" (*ptep) 211 : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY) 212 : "cc" ); 213#else 214 unsigned long old = pte_val(*ptep); 215 *ptep = __pte(old & ~clr); 216#endif 217 /* huge pages use the old page table lock */ 218 if (!huge) 219 assert_pte_locked(mm, addr); 220 221#ifdef CONFIG_PPC_STD_MMU_64 222 if (old & _PAGE_HASHPTE) 223 hpte_need_flush(mm, addr, ptep, old, huge); 224#endif 225 226 return old; 227} 228 229static inline int __ptep_test_and_clear_young(struct mm_struct *mm, 230 unsigned long addr, pte_t *ptep) 231{ 232 unsigned long old; 233 234 if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) 235 return 0; 236 old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0); 237 return (old & _PAGE_ACCESSED) != 0; 238} 239#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 240#define ptep_test_and_clear_young(__vma, __addr, __ptep) \ 241({ \ 242 int __r; \ 243 __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \ 244 __r; \ 245}) 246 247#define __HAVE_ARCH_PTEP_SET_WRPROTECT 248static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, 249 pte_t *ptep) 250{ 251 252 if ((pte_val(*ptep) & _PAGE_RW) == 0) 253 return; 254 255 pte_update(mm, addr, ptep, _PAGE_RW, 0); 256} 257 258static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, 259 unsigned long addr, pte_t *ptep) 260{ 261 if ((pte_val(*ptep) & _PAGE_RW) == 0) 262 return; 263 264 pte_update(mm, addr, ptep, _PAGE_RW, 1); 265} 266 267/* 268 * We currently remove entries from the hashtable regardless of whether 269 * the entry was young or dirty. The generic routines only flush if the 270 * entry was young or dirty which is not good enough. 271 * 272 * We should be more intelligent about this but for the moment we override 273 * these functions and force a tlb flush unconditionally 274 */ 275#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 276#define ptep_clear_flush_young(__vma, __address, __ptep) \ 277({ \ 278 int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \ 279 __ptep); \ 280 __young; \ 281}) 282 283#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 284static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 285 unsigned long addr, pte_t *ptep) 286{ 287 unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0); 288 return __pte(old); 289} 290 291static inline void pte_clear(struct mm_struct *mm, unsigned long addr, 292 pte_t * ptep) 293{ 294 pte_update(mm, addr, ptep, ~0UL, 0); 295} 296 297 298/* Set the dirty and/or accessed bits atomically in a linux PTE, this 299 * function doesn't need to flush the hash entry 300 */ 301static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) 302{ 303 unsigned long bits = pte_val(entry) & 304 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); 305 306#ifdef PTE_ATOMIC_UPDATES 307 unsigned long old, tmp; 308 309 __asm__ __volatile__( 310 "1: ldarx %0,0,%4\n\ 311 andi. %1,%0,%6\n\ 312 bne- 1b \n\ 313 or %0,%3,%0\n\ 314 stdcx. %0,0,%4\n\ 315 bne- 1b" 316 :"=&r" (old), "=&r" (tmp), "=m" (*ptep) 317 :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY) 318 :"cc"); 319#else 320 unsigned long old = pte_val(*ptep); 321 *ptep = __pte(old | bits); 322#endif 323} 324 325#define __HAVE_ARCH_PTE_SAME 326#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) 327 328#define pte_ERROR(e) \ 329 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) 330#define pmd_ERROR(e) \ 331 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) 332#define pgd_ERROR(e) \ 333 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) 334 335/* Encode and de-code a swap entry */ 336#define __swp_type(entry) (((entry).val >> 1) & 0x3f) 337#define __swp_offset(entry) ((entry).val >> 8) 338#define __swp_entry(type, offset) ((swp_entry_t){((type)<< 1)|((offset)<<8)}) 339#define __pte_to_swp_entry(pte) ((swp_entry_t){pte_val(pte) >> PTE_RPN_SHIFT}) 340#define __swp_entry_to_pte(x) ((pte_t) { (x).val << PTE_RPN_SHIFT }) 341#define pte_to_pgoff(pte) (pte_val(pte) >> PTE_RPN_SHIFT) 342#define pgoff_to_pte(off) ((pte_t) {((off) << PTE_RPN_SHIFT)|_PAGE_FILE}) 343#define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_RPN_SHIFT) 344 345void pgtable_cache_add(unsigned shift, void (*ctor)(void *)); 346void pgtable_cache_init(void); 347#endif /* __ASSEMBLY__ */ 348 349/* 350 * THP pages can't be special. So use the _PAGE_SPECIAL 351 */ 352#define _PAGE_SPLITTING _PAGE_SPECIAL 353 354/* 355 * We need to differentiate between explicit huge page and THP huge 356 * page, since THP huge page also need to track real subpage details 357 */ 358#define _PAGE_THP_HUGE _PAGE_4K_PFN 359 360/* 361 * set of bits not changed in pmd_modify. 362 */ 363#define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | \ 364 _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPLITTING | \ 365 _PAGE_THP_HUGE) 366 367#ifndef __ASSEMBLY__ 368/* 369 * The linux hugepage PMD now include the pmd entries followed by the address 370 * to the stashed pgtable_t. The stashed pgtable_t contains the hpte bits. 371 * [ 1 bit secondary | 3 bit hidx | 1 bit valid | 000]. We use one byte per 372 * each HPTE entry. With 16MB hugepage and 64K HPTE we need 256 entries and 373 * with 4K HPTE we need 4096 entries. Both will fit in a 4K pgtable_t. 374 * 375 * The last three bits are intentionally left to zero. This memory location 376 * are also used as normal page PTE pointers. So if we have any pointers 377 * left around while we collapse a hugepage, we need to make sure 378 * _PAGE_PRESENT and _PAGE_FILE bits of that are zero when we look at them 379 */ 380static inline unsigned int hpte_valid(unsigned char *hpte_slot_array, int index) 381{ 382 return (hpte_slot_array[index] >> 3) & 0x1; 383} 384 385static inline unsigned int hpte_hash_index(unsigned char *hpte_slot_array, 386 int index) 387{ 388 return hpte_slot_array[index] >> 4; 389} 390 391static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array, 392 unsigned int index, unsigned int hidx) 393{ 394 hpte_slot_array[index] = hidx << 4 | 0x1 << 3; 395} 396 397struct page *realmode_pfn_to_page(unsigned long pfn); 398 399static inline char *get_hpte_slot_array(pmd_t *pmdp) 400{ 401 /* 402 * The hpte hindex is stored in the pgtable whose address is in the 403 * second half of the PMD 404 * 405 * Order this load with the test for pmd_trans_huge in the caller 406 */ 407 smp_rmb(); 408 return *(char **)(pmdp + PTRS_PER_PMD); 409 410 411} 412 413extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, 414 pmd_t *pmdp); 415#ifdef CONFIG_TRANSPARENT_HUGEPAGE 416extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot); 417extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot); 418extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot); 419extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, 420 pmd_t *pmdp, pmd_t pmd); 421extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, 422 pmd_t *pmd); 423 424static inline int pmd_trans_huge(pmd_t pmd) 425{ 426 /* 427 * leaf pte for huge page, bottom two bits != 00 428 */ 429 return (pmd_val(pmd) & 0x3) && (pmd_val(pmd) & _PAGE_THP_HUGE); 430} 431 432static inline int pmd_large(pmd_t pmd) 433{ 434 /* 435 * leaf pte for huge page, bottom two bits != 00 436 */ 437 if (pmd_trans_huge(pmd)) 438 return pmd_val(pmd) & _PAGE_PRESENT; 439 return 0; 440} 441 442static inline int pmd_trans_splitting(pmd_t pmd) 443{ 444 if (pmd_trans_huge(pmd)) 445 return pmd_val(pmd) & _PAGE_SPLITTING; 446 return 0; 447} 448 449extern int has_transparent_hugepage(void); 450#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 451 452static inline pte_t pmd_pte(pmd_t pmd) 453{ 454 return __pte(pmd_val(pmd)); 455} 456 457static inline pmd_t pte_pmd(pte_t pte) 458{ 459 return __pmd(pte_val(pte)); 460} 461 462static inline pte_t *pmdp_ptep(pmd_t *pmd) 463{ 464 return (pte_t *)pmd; 465} 466 467#define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd)) 468#define pmd_young(pmd) pte_young(pmd_pte(pmd)) 469#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) 470#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) 471#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) 472#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) 473#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) 474 475#define __HAVE_ARCH_PMD_WRITE 476#define pmd_write(pmd) pte_write(pmd_pte(pmd)) 477 478static inline pmd_t pmd_mkhuge(pmd_t pmd) 479{ 480 /* Do nothing, mk_pmd() does this part. */ 481 return pmd; 482} 483 484static inline pmd_t pmd_mknotpresent(pmd_t pmd) 485{ 486 pmd_val(pmd) &= ~_PAGE_PRESENT; 487 return pmd; 488} 489 490static inline pmd_t pmd_mksplitting(pmd_t pmd) 491{ 492 pmd_val(pmd) |= _PAGE_SPLITTING; 493 return pmd; 494} 495 496#define __HAVE_ARCH_PMD_SAME 497static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) 498{ 499 return (((pmd_val(pmd_a) ^ pmd_val(pmd_b)) & ~_PAGE_HPTEFLAGS) == 0); 500} 501 502#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 503extern int pmdp_set_access_flags(struct vm_area_struct *vma, 504 unsigned long address, pmd_t *pmdp, 505 pmd_t entry, int dirty); 506 507extern unsigned long pmd_hugepage_update(struct mm_struct *mm, 508 unsigned long addr, 509 pmd_t *pmdp, unsigned long clr); 510 511static inline int __pmdp_test_and_clear_young(struct mm_struct *mm, 512 unsigned long addr, pmd_t *pmdp) 513{ 514 unsigned long old; 515 516 if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) 517 return 0; 518 old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED); 519 return ((old & _PAGE_ACCESSED) != 0); 520} 521 522#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG 523extern int pmdp_test_and_clear_young(struct vm_area_struct *vma, 524 unsigned long address, pmd_t *pmdp); 525#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH 526extern int pmdp_clear_flush_young(struct vm_area_struct *vma, 527 unsigned long address, pmd_t *pmdp); 528 529#define __HAVE_ARCH_PMDP_GET_AND_CLEAR 530extern pmd_t pmdp_get_and_clear(struct mm_struct *mm, 531 unsigned long addr, pmd_t *pmdp); 532 533#define __HAVE_ARCH_PMDP_CLEAR_FLUSH 534extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address, 535 pmd_t *pmdp); 536 537#define __HAVE_ARCH_PMDP_SET_WRPROTECT 538static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr, 539 pmd_t *pmdp) 540{ 541 542 if ((pmd_val(*pmdp) & _PAGE_RW) == 0) 543 return; 544 545 pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW); 546} 547 548#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH 549extern void pmdp_splitting_flush(struct vm_area_struct *vma, 550 unsigned long address, pmd_t *pmdp); 551 552#define __HAVE_ARCH_PGTABLE_DEPOSIT 553extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 554 pgtable_t pgtable); 555#define __HAVE_ARCH_PGTABLE_WITHDRAW 556extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); 557 558#define __HAVE_ARCH_PMDP_INVALIDATE 559extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 560 pmd_t *pmdp); 561 562#define pmd_move_must_withdraw pmd_move_must_withdraw 563struct spinlock; 564static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, 565 struct spinlock *old_pmd_ptl) 566{ 567 /* 568 * Archs like ppc64 use pgtable to store per pmd 569 * specific information. So when we switch the pmd, 570 * we should also withdraw and deposit the pgtable 571 */ 572 return true; 573} 574 575#endif /* __ASSEMBLY__ */ 576#endif /* _ASM_POWERPC_PGTABLE_PPC64_H_ */