Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v3.17-rc2 580 lines 17 kB view raw
1#ifndef _ASM_POWERPC_PGTABLE_PPC64_H_ 2#define _ASM_POWERPC_PGTABLE_PPC64_H_ 3/* 4 * This file contains the functions and defines necessary to modify and use 5 * the ppc64 hashed page table. 6 */ 7 8#ifdef CONFIG_PPC_64K_PAGES 9#include <asm/pgtable-ppc64-64k.h> 10#else 11#include <asm/pgtable-ppc64-4k.h> 12#endif 13#include <asm/barrier.h> 14 15#define FIRST_USER_ADDRESS 0 16 17/* 18 * Size of EA range mapped by our pagetables. 19 */ 20#define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \ 21 PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT) 22#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE) 23 24#ifdef CONFIG_TRANSPARENT_HUGEPAGE 25#define PMD_CACHE_INDEX (PMD_INDEX_SIZE + 1) 26#else 27#define PMD_CACHE_INDEX PMD_INDEX_SIZE 28#endif 29/* 30 * Define the address range of the kernel non-linear virtual area 31 */ 32 33#ifdef CONFIG_PPC_BOOK3E 34#define KERN_VIRT_START ASM_CONST(0x8000000000000000) 35#else 36#define KERN_VIRT_START ASM_CONST(0xD000000000000000) 37#endif 38#define KERN_VIRT_SIZE ASM_CONST(0x0000100000000000) 39 40/* 41 * The vmalloc space starts at the beginning of that region, and 42 * occupies half of it on hash CPUs and a quarter of it on Book3E 43 * (we keep a quarter for the virtual memmap) 44 */ 45#define VMALLOC_START KERN_VIRT_START 46#ifdef CONFIG_PPC_BOOK3E 47#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 2) 48#else 49#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) 50#endif 51#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) 52 53/* 54 * The second half of the kernel virtual space is used for IO mappings, 55 * it's itself carved into the PIO region (ISA and PHB IO space) and 56 * the ioremap space 57 * 58 * ISA_IO_BASE = KERN_IO_START, 64K reserved area 59 * PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces 60 * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE 61 */ 62#define KERN_IO_START (KERN_VIRT_START + (KERN_VIRT_SIZE >> 1)) 63#define FULL_IO_SIZE 0x80000000ul 64#define ISA_IO_BASE (KERN_IO_START) 65#define ISA_IO_END (KERN_IO_START + 0x10000ul) 66#define PHB_IO_BASE (ISA_IO_END) 67#define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE) 68#define IOREMAP_BASE (PHB_IO_END) 69#define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE) 70 71 72/* 73 * Region IDs 74 */ 75#define REGION_SHIFT 60UL 76#define REGION_MASK (0xfUL << REGION_SHIFT) 77#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT) 78 79#define VMALLOC_REGION_ID (REGION_ID(VMALLOC_START)) 80#define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET)) 81#define VMEMMAP_REGION_ID (0xfUL) /* Server only */ 82#define USER_REGION_ID (0UL) 83 84/* 85 * Defines the address of the vmemap area, in its own region on 86 * hash table CPUs and after the vmalloc space on Book3E 87 */ 88#ifdef CONFIG_PPC_BOOK3E 89#define VMEMMAP_BASE VMALLOC_END 90#define VMEMMAP_END KERN_IO_START 91#else 92#define VMEMMAP_BASE (VMEMMAP_REGION_ID << REGION_SHIFT) 93#endif 94#define vmemmap ((struct page *)VMEMMAP_BASE) 95 96 97/* 98 * Include the PTE bits definitions 99 */ 100#ifdef CONFIG_PPC_BOOK3S 101#include <asm/pte-hash64.h> 102#else 103#include <asm/pte-book3e.h> 104#endif 105#include <asm/pte-common.h> 106 107#ifdef CONFIG_PPC_MM_SLICES 108#define HAVE_ARCH_UNMAPPED_AREA 109#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 110#endif /* CONFIG_PPC_MM_SLICES */ 111 112#ifndef __ASSEMBLY__ 113 114/* 115 * This is the default implementation of various PTE accessors, it's 116 * used in all cases except Book3S with 64K pages where we have a 117 * concept of sub-pages 118 */ 119#ifndef __real_pte 120 121#ifdef STRICT_MM_TYPECHECKS 122#define __real_pte(e,p) ((real_pte_t){(e)}) 123#define __rpte_to_pte(r) ((r).pte) 124#else 125#define __real_pte(e,p) (e) 126#define __rpte_to_pte(r) (__pte(r)) 127#endif 128#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> 12) 129 130#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \ 131 do { \ 132 index = 0; \ 133 shift = mmu_psize_defs[psize].shift; \ 134 135#define pte_iterate_hashed_end() } while(0) 136 137#ifdef CONFIG_PPC_HAS_HASH_64K 138#define pte_pagesize_index(mm, addr, pte) get_slice_psize(mm, addr) 139#else 140#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K 141#endif 142 143#endif /* __real_pte */ 144 145 146/* pte_clear moved to later in this file */ 147 148#define PMD_BAD_BITS (PTE_TABLE_SIZE-1) 149#define PUD_BAD_BITS (PMD_TABLE_SIZE-1) 150 151#define pmd_set(pmdp, pmdval) (pmd_val(*(pmdp)) = (pmdval)) 152#define pmd_none(pmd) (!pmd_val(pmd)) 153#define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \ 154 || (pmd_val(pmd) & PMD_BAD_BITS)) 155#define pmd_present(pmd) (pmd_val(pmd) != 0) 156#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) 157#define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS) 158extern struct page *pmd_page(pmd_t pmd); 159 160#define pud_set(pudp, pudval) (pud_val(*(pudp)) = (pudval)) 161#define pud_none(pud) (!pud_val(pud)) 162#define pud_bad(pud) (!is_kernel_addr(pud_val(pud)) \ 163 || (pud_val(pud) & PUD_BAD_BITS)) 164#define pud_present(pud) (pud_val(pud) != 0) 165#define pud_clear(pudp) (pud_val(*(pudp)) = 0) 166#define pud_page_vaddr(pud) (pud_val(pud) & ~PUD_MASKED_BITS) 167#define pud_page(pud) virt_to_page(pud_page_vaddr(pud)) 168 169#define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);}) 170 171/* 172 * Find an entry in a page-table-directory. We combine the address region 173 * (the high order N bits) and the pgd portion of the address. 174 */ 175#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1)) 176 177#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) 178 179#define pmd_offset(pudp,addr) \ 180 (((pmd_t *) pud_page_vaddr(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) 181 182#define pte_offset_kernel(dir,addr) \ 183 (((pte_t *) pmd_page_vaddr(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) 184 185#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) 186#define pte_unmap(pte) do { } while(0) 187 188/* to find an entry in a kernel page-table-directory */ 189/* This now only contains the vmalloc pages */ 190#define pgd_offset_k(address) pgd_offset(&init_mm, address) 191extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr, 192 pte_t *ptep, unsigned long pte, int huge); 193 194/* Atomic PTE updates */ 195static inline unsigned long pte_update(struct mm_struct *mm, 196 unsigned long addr, 197 pte_t *ptep, unsigned long clr, 198 unsigned long set, 199 int huge) 200{ 201#ifdef PTE_ATOMIC_UPDATES 202 unsigned long old, tmp; 203 204 __asm__ __volatile__( 205 "1: ldarx %0,0,%3 # pte_update\n\ 206 andi. %1,%0,%6\n\ 207 bne- 1b \n\ 208 andc %1,%0,%4 \n\ 209 or %1,%1,%7\n\ 210 stdcx. %1,0,%3 \n\ 211 bne- 1b" 212 : "=&r" (old), "=&r" (tmp), "=m" (*ptep) 213 : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY), "r" (set) 214 : "cc" ); 215#else 216 unsigned long old = pte_val(*ptep); 217 *ptep = __pte((old & ~clr) | set); 218#endif 219 /* huge pages use the old page table lock */ 220 if (!huge) 221 assert_pte_locked(mm, addr); 222 223#ifdef CONFIG_PPC_STD_MMU_64 224 if (old & _PAGE_HASHPTE) 225 hpte_need_flush(mm, addr, ptep, old, huge); 226#endif 227 228 return old; 229} 230 231static inline int __ptep_test_and_clear_young(struct mm_struct *mm, 232 unsigned long addr, pte_t *ptep) 233{ 234 unsigned long old; 235 236 if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) 237 return 0; 238 old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0); 239 return (old & _PAGE_ACCESSED) != 0; 240} 241#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 242#define ptep_test_and_clear_young(__vma, __addr, __ptep) \ 243({ \ 244 int __r; \ 245 __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \ 246 __r; \ 247}) 248 249#define __HAVE_ARCH_PTEP_SET_WRPROTECT 250static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, 251 pte_t *ptep) 252{ 253 254 if ((pte_val(*ptep) & _PAGE_RW) == 0) 255 return; 256 257 pte_update(mm, addr, ptep, _PAGE_RW, 0, 0); 258} 259 260static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, 261 unsigned long addr, pte_t *ptep) 262{ 263 if ((pte_val(*ptep) & _PAGE_RW) == 0) 264 return; 265 266 pte_update(mm, addr, ptep, _PAGE_RW, 0, 1); 267} 268 269/* 270 * We currently remove entries from the hashtable regardless of whether 271 * the entry was young or dirty. The generic routines only flush if the 272 * entry was young or dirty which is not good enough. 273 * 274 * We should be more intelligent about this but for the moment we override 275 * these functions and force a tlb flush unconditionally 276 */ 277#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 278#define ptep_clear_flush_young(__vma, __address, __ptep) \ 279({ \ 280 int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \ 281 __ptep); \ 282 __young; \ 283}) 284 285#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 286static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 287 unsigned long addr, pte_t *ptep) 288{ 289 unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0); 290 return __pte(old); 291} 292 293static inline void pte_clear(struct mm_struct *mm, unsigned long addr, 294 pte_t * ptep) 295{ 296 pte_update(mm, addr, ptep, ~0UL, 0, 0); 297} 298 299 300/* Set the dirty and/or accessed bits atomically in a linux PTE, this 301 * function doesn't need to flush the hash entry 302 */ 303static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) 304{ 305 unsigned long bits = pte_val(entry) & 306 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); 307 308#ifdef PTE_ATOMIC_UPDATES 309 unsigned long old, tmp; 310 311 __asm__ __volatile__( 312 "1: ldarx %0,0,%4\n\ 313 andi. %1,%0,%6\n\ 314 bne- 1b \n\ 315 or %0,%3,%0\n\ 316 stdcx. %0,0,%4\n\ 317 bne- 1b" 318 :"=&r" (old), "=&r" (tmp), "=m" (*ptep) 319 :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY) 320 :"cc"); 321#else 322 unsigned long old = pte_val(*ptep); 323 *ptep = __pte(old | bits); 324#endif 325} 326 327#define __HAVE_ARCH_PTE_SAME 328#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) 329 330#define pte_ERROR(e) \ 331 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) 332#define pmd_ERROR(e) \ 333 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) 334#define pgd_ERROR(e) \ 335 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) 336 337/* Encode and de-code a swap entry */ 338#define __swp_type(entry) (((entry).val >> 1) & 0x3f) 339#define __swp_offset(entry) ((entry).val >> 8) 340#define __swp_entry(type, offset) ((swp_entry_t){((type)<< 1)|((offset)<<8)}) 341#define __pte_to_swp_entry(pte) ((swp_entry_t){pte_val(pte) >> PTE_RPN_SHIFT}) 342#define __swp_entry_to_pte(x) ((pte_t) { (x).val << PTE_RPN_SHIFT }) 343#define pte_to_pgoff(pte) (pte_val(pte) >> PTE_RPN_SHIFT) 344#define pgoff_to_pte(off) ((pte_t) {((off) << PTE_RPN_SHIFT)|_PAGE_FILE}) 345#define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_RPN_SHIFT) 346 347void pgtable_cache_add(unsigned shift, void (*ctor)(void *)); 348void pgtable_cache_init(void); 349#endif /* __ASSEMBLY__ */ 350 351/* 352 * THP pages can't be special. So use the _PAGE_SPECIAL 353 */ 354#define _PAGE_SPLITTING _PAGE_SPECIAL 355 356/* 357 * We need to differentiate between explicit huge page and THP huge 358 * page, since THP huge page also need to track real subpage details 359 */ 360#define _PAGE_THP_HUGE _PAGE_4K_PFN 361 362/* 363 * set of bits not changed in pmd_modify. 364 */ 365#define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | \ 366 _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPLITTING | \ 367 _PAGE_THP_HUGE) 368 369#ifndef __ASSEMBLY__ 370/* 371 * The linux hugepage PMD now include the pmd entries followed by the address 372 * to the stashed pgtable_t. The stashed pgtable_t contains the hpte bits. 373 * [ 1 bit secondary | 3 bit hidx | 1 bit valid | 000]. We use one byte per 374 * each HPTE entry. With 16MB hugepage and 64K HPTE we need 256 entries and 375 * with 4K HPTE we need 4096 entries. Both will fit in a 4K pgtable_t. 376 * 377 * The last three bits are intentionally left to zero. This memory location 378 * are also used as normal page PTE pointers. So if we have any pointers 379 * left around while we collapse a hugepage, we need to make sure 380 * _PAGE_PRESENT and _PAGE_FILE bits of that are zero when we look at them 381 */ 382static inline unsigned int hpte_valid(unsigned char *hpte_slot_array, int index) 383{ 384 return (hpte_slot_array[index] >> 3) & 0x1; 385} 386 387static inline unsigned int hpte_hash_index(unsigned char *hpte_slot_array, 388 int index) 389{ 390 return hpte_slot_array[index] >> 4; 391} 392 393static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array, 394 unsigned int index, unsigned int hidx) 395{ 396 hpte_slot_array[index] = hidx << 4 | 0x1 << 3; 397} 398 399struct page *realmode_pfn_to_page(unsigned long pfn); 400 401static inline char *get_hpte_slot_array(pmd_t *pmdp) 402{ 403 /* 404 * The hpte hindex is stored in the pgtable whose address is in the 405 * second half of the PMD 406 * 407 * Order this load with the test for pmd_trans_huge in the caller 408 */ 409 smp_rmb(); 410 return *(char **)(pmdp + PTRS_PER_PMD); 411 412 413} 414 415extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, 416 pmd_t *pmdp, unsigned long old_pmd); 417#ifdef CONFIG_TRANSPARENT_HUGEPAGE 418extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot); 419extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot); 420extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot); 421extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, 422 pmd_t *pmdp, pmd_t pmd); 423extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, 424 pmd_t *pmd); 425 426static inline int pmd_trans_huge(pmd_t pmd) 427{ 428 /* 429 * leaf pte for huge page, bottom two bits != 00 430 */ 431 return (pmd_val(pmd) & 0x3) && (pmd_val(pmd) & _PAGE_THP_HUGE); 432} 433 434static inline int pmd_large(pmd_t pmd) 435{ 436 /* 437 * leaf pte for huge page, bottom two bits != 00 438 */ 439 if (pmd_trans_huge(pmd)) 440 return pmd_val(pmd) & _PAGE_PRESENT; 441 return 0; 442} 443 444static inline int pmd_trans_splitting(pmd_t pmd) 445{ 446 if (pmd_trans_huge(pmd)) 447 return pmd_val(pmd) & _PAGE_SPLITTING; 448 return 0; 449} 450 451extern int has_transparent_hugepage(void); 452#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 453 454static inline pte_t pmd_pte(pmd_t pmd) 455{ 456 return __pte(pmd_val(pmd)); 457} 458 459static inline pmd_t pte_pmd(pte_t pte) 460{ 461 return __pmd(pte_val(pte)); 462} 463 464static inline pte_t *pmdp_ptep(pmd_t *pmd) 465{ 466 return (pte_t *)pmd; 467} 468 469#define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd)) 470#define pmd_young(pmd) pte_young(pmd_pte(pmd)) 471#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) 472#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) 473#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) 474#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) 475#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) 476 477#define __HAVE_ARCH_PMD_WRITE 478#define pmd_write(pmd) pte_write(pmd_pte(pmd)) 479 480static inline pmd_t pmd_mkhuge(pmd_t pmd) 481{ 482 /* Do nothing, mk_pmd() does this part. */ 483 return pmd; 484} 485 486static inline pmd_t pmd_mknotpresent(pmd_t pmd) 487{ 488 pmd_val(pmd) &= ~_PAGE_PRESENT; 489 return pmd; 490} 491 492static inline pmd_t pmd_mksplitting(pmd_t pmd) 493{ 494 pmd_val(pmd) |= _PAGE_SPLITTING; 495 return pmd; 496} 497 498#define __HAVE_ARCH_PMD_SAME 499static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) 500{ 501 return (((pmd_val(pmd_a) ^ pmd_val(pmd_b)) & ~_PAGE_HPTEFLAGS) == 0); 502} 503 504#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 505extern int pmdp_set_access_flags(struct vm_area_struct *vma, 506 unsigned long address, pmd_t *pmdp, 507 pmd_t entry, int dirty); 508 509extern unsigned long pmd_hugepage_update(struct mm_struct *mm, 510 unsigned long addr, 511 pmd_t *pmdp, 512 unsigned long clr, 513 unsigned long set); 514 515static inline int __pmdp_test_and_clear_young(struct mm_struct *mm, 516 unsigned long addr, pmd_t *pmdp) 517{ 518 unsigned long old; 519 520 if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) 521 return 0; 522 old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0); 523 return ((old & _PAGE_ACCESSED) != 0); 524} 525 526#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG 527extern int pmdp_test_and_clear_young(struct vm_area_struct *vma, 528 unsigned long address, pmd_t *pmdp); 529#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH 530extern int pmdp_clear_flush_young(struct vm_area_struct *vma, 531 unsigned long address, pmd_t *pmdp); 532 533#define __HAVE_ARCH_PMDP_GET_AND_CLEAR 534extern pmd_t pmdp_get_and_clear(struct mm_struct *mm, 535 unsigned long addr, pmd_t *pmdp); 536 537#define __HAVE_ARCH_PMDP_CLEAR_FLUSH 538extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address, 539 pmd_t *pmdp); 540 541#define __HAVE_ARCH_PMDP_SET_WRPROTECT 542static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr, 543 pmd_t *pmdp) 544{ 545 546 if ((pmd_val(*pmdp) & _PAGE_RW) == 0) 547 return; 548 549 pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW, 0); 550} 551 552#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH 553extern void pmdp_splitting_flush(struct vm_area_struct *vma, 554 unsigned long address, pmd_t *pmdp); 555 556#define __HAVE_ARCH_PGTABLE_DEPOSIT 557extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 558 pgtable_t pgtable); 559#define __HAVE_ARCH_PGTABLE_WITHDRAW 560extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); 561 562#define __HAVE_ARCH_PMDP_INVALIDATE 563extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 564 pmd_t *pmdp); 565 566#define pmd_move_must_withdraw pmd_move_must_withdraw 567struct spinlock; 568static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, 569 struct spinlock *old_pmd_ptl) 570{ 571 /* 572 * Archs like ppc64 use pgtable to store per pmd 573 * specific information. So when we switch the pmd, 574 * we should also withdraw and deposit the pgtable 575 */ 576 return true; 577} 578 579#endif /* __ASSEMBLY__ */ 580#endif /* _ASM_POWERPC_PGTABLE_PPC64_H_ */