Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.13-rc2 545 lines 18 kB view raw
1#ifndef _PPC64_PGTABLE_H 2#define _PPC64_PGTABLE_H 3 4/* 5 * This file contains the functions and defines necessary to modify and use 6 * the ppc64 hashed page table. 7 */ 8 9#ifndef __ASSEMBLY__ 10#include <linux/config.h> 11#include <linux/stddef.h> 12#include <asm/processor.h> /* For TASK_SIZE */ 13#include <asm/mmu.h> 14#include <asm/page.h> 15#include <asm/tlbflush.h> 16#endif /* __ASSEMBLY__ */ 17 18#include <asm-generic/pgtable-nopud.h> 19 20/* 21 * Entries per page directory level. The PTE level must use a 64b record 22 * for each page table entry. The PMD and PGD level use a 32b record for 23 * each entry by assuming that each entry is page aligned. 24 */ 25#define PTE_INDEX_SIZE 9 26#define PMD_INDEX_SIZE 10 27#define PGD_INDEX_SIZE 10 28 29#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) 30#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) 31#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) 32 33/* PMD_SHIFT determines what a second-level page table entry can map */ 34#define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) 35#define PMD_SIZE (1UL << PMD_SHIFT) 36#define PMD_MASK (~(PMD_SIZE-1)) 37 38/* PGDIR_SHIFT determines what a third-level page table entry can map */ 39#define PGDIR_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE) 40#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 41#define PGDIR_MASK (~(PGDIR_SIZE-1)) 42 43#define FIRST_USER_ADDRESS 0 44 45/* 46 * Size of EA range mapped by our pagetables. 47 */ 48#define EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \ 49 PGD_INDEX_SIZE + PAGE_SHIFT) 50#define EADDR_MASK ((1UL << EADDR_SIZE) - 1) 51 52/* 53 * Define the address range of the vmalloc VM area. 54 */ 55#define VMALLOC_START (0xD000000000000000ul) 56#define VMALLOC_SIZE (0x10000000000UL) 57#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) 58 59/* 60 * Bits in a linux-style PTE. These match the bits in the 61 * (hardware-defined) PowerPC PTE as closely as possible. 62 */ 63#define _PAGE_PRESENT 0x0001 /* software: pte contains a translation */ 64#define _PAGE_USER 0x0002 /* matches one of the PP bits */ 65#define _PAGE_FILE 0x0002 /* (!present only) software: pte holds file offset */ 66#define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */ 67#define _PAGE_GUARDED 0x0008 68#define _PAGE_COHERENT 0x0010 /* M: enforce memory coherence (SMP systems) */ 69#define _PAGE_NO_CACHE 0x0020 /* I: cache inhibit */ 70#define _PAGE_WRITETHRU 0x0040 /* W: cache write-through */ 71#define _PAGE_DIRTY 0x0080 /* C: page changed */ 72#define _PAGE_ACCESSED 0x0100 /* R: page referenced */ 73#define _PAGE_RW 0x0200 /* software: user write access allowed */ 74#define _PAGE_HASHPTE 0x0400 /* software: pte has an associated HPTE */ 75#define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */ 76#define _PAGE_SECONDARY 0x8000 /* software: HPTE is in secondary group */ 77#define _PAGE_GROUP_IX 0x7000 /* software: HPTE index within group */ 78#define _PAGE_HUGE 0x10000 /* 16MB page */ 79/* Bits 0x7000 identify the index within an HPT Group */ 80#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | _PAGE_SECONDARY | _PAGE_GROUP_IX) 81/* PAGE_MASK gives the right answer below, but only by accident */ 82/* It should be preserving the high 48 bits and then specifically */ 83/* preserving _PAGE_SECONDARY | _PAGE_GROUP_IX */ 84#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HPTEFLAGS) 85 86#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT) 87 88#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY) 89 90/* __pgprot defined in asm-ppc64/page.h */ 91#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED) 92 93#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER) 94#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER | _PAGE_EXEC) 95#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) 96#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) 97#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) 98#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) 99#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_WRENABLE) 100#define PAGE_KERNEL_CI __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ 101 _PAGE_WRENABLE | _PAGE_NO_CACHE | _PAGE_GUARDED) 102#define PAGE_KERNEL_EXEC __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_EXEC) 103 104#define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE) 105#define HAVE_PAGE_AGP 106 107/* 108 * This bit in a hardware PTE indicates that the page is *not* executable. 109 */ 110#define HW_NO_EXEC _PAGE_EXEC 111 112/* 113 * POWER4 and newer have per page execute protection, older chips can only 114 * do this on a segment (256MB) basis. 115 * 116 * Also, write permissions imply read permissions. 117 * This is the closest we can get.. 118 * 119 * Note due to the way vm flags are laid out, the bits are XWR 120 */ 121#define __P000 PAGE_NONE 122#define __P001 PAGE_READONLY 123#define __P010 PAGE_COPY 124#define __P011 PAGE_COPY 125#define __P100 PAGE_READONLY_X 126#define __P101 PAGE_READONLY_X 127#define __P110 PAGE_COPY_X 128#define __P111 PAGE_COPY_X 129 130#define __S000 PAGE_NONE 131#define __S001 PAGE_READONLY 132#define __S010 PAGE_SHARED 133#define __S011 PAGE_SHARED 134#define __S100 PAGE_READONLY_X 135#define __S101 PAGE_READONLY_X 136#define __S110 PAGE_SHARED_X 137#define __S111 PAGE_SHARED_X 138 139#ifndef __ASSEMBLY__ 140 141/* 142 * ZERO_PAGE is a global shared page that is always zero: used 143 * for zero-mapped memory areas etc.. 144 */ 145extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; 146#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 147#endif /* __ASSEMBLY__ */ 148 149/* shift to put page number into pte */ 150#define PTE_SHIFT (17) 151 152#ifdef CONFIG_HUGETLB_PAGE 153 154#ifndef __ASSEMBLY__ 155int hash_huge_page(struct mm_struct *mm, unsigned long access, 156 unsigned long ea, unsigned long vsid, int local); 157 158void hugetlb_mm_free_pgd(struct mm_struct *mm); 159#endif /* __ASSEMBLY__ */ 160 161#define HAVE_ARCH_UNMAPPED_AREA 162#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 163#else 164 165#define hash_huge_page(mm,a,ea,vsid,local) -1 166#define hugetlb_mm_free_pgd(mm) do {} while (0) 167 168#endif 169 170#ifndef __ASSEMBLY__ 171 172/* 173 * Conversion functions: convert a page and protection to a page entry, 174 * and a page entry and page directory to the page they refer to. 175 * 176 * mk_pte takes a (struct page *) as input 177 */ 178#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 179 180static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) 181{ 182 pte_t pte; 183 184 185 pte_val(pte) = (pfn << PTE_SHIFT) | pgprot_val(pgprot); 186 return pte; 187} 188 189#define pte_modify(_pte, newprot) \ 190 (__pte((pte_val(_pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))) 191 192#define pte_none(pte) ((pte_val(pte) & ~_PAGE_HPTEFLAGS) == 0) 193#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) 194 195/* pte_clear moved to later in this file */ 196 197#define pte_pfn(x) ((unsigned long)((pte_val(x) >> PTE_SHIFT))) 198#define pte_page(x) pfn_to_page(pte_pfn(x)) 199 200#define pmd_set(pmdp, ptep) \ 201 (pmd_val(*(pmdp)) = __ba_to_bpn(ptep)) 202#define pmd_none(pmd) (!pmd_val(pmd)) 203#define pmd_bad(pmd) (pmd_val(pmd) == 0) 204#define pmd_present(pmd) (pmd_val(pmd) != 0) 205#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) 206#define pmd_page_kernel(pmd) (__bpn_to_ba(pmd_val(pmd))) 207#define pmd_page(pmd) virt_to_page(pmd_page_kernel(pmd)) 208 209#define pud_set(pudp, pmdp) (pud_val(*(pudp)) = (__ba_to_bpn(pmdp))) 210#define pud_none(pud) (!pud_val(pud)) 211#define pud_bad(pud) ((pud_val(pud)) == 0UL) 212#define pud_present(pud) (pud_val(pud) != 0UL) 213#define pud_clear(pudp) (pud_val(*(pudp)) = 0UL) 214#define pud_page(pud) (__bpn_to_ba(pud_val(pud))) 215 216/* 217 * Find an entry in a page-table-directory. We combine the address region 218 * (the high order N bits) and the pgd portion of the address. 219 */ 220/* to avoid overflow in free_pgtables we don't use PTRS_PER_PGD here */ 221#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & 0x7ff) 222 223#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) 224 225/* Find an entry in the second-level page table.. */ 226#define pmd_offset(pudp,addr) \ 227 ((pmd_t *) pud_page(*(pudp)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) 228 229/* Find an entry in the third-level page table.. */ 230#define pte_offset_kernel(dir,addr) \ 231 ((pte_t *) pmd_page_kernel(*(dir)) \ 232 + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) 233 234#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) 235#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) 236#define pte_unmap(pte) do { } while(0) 237#define pte_unmap_nested(pte) do { } while(0) 238 239/* to find an entry in a kernel page-table-directory */ 240/* This now only contains the vmalloc pages */ 241#define pgd_offset_k(address) pgd_offset(&init_mm, address) 242 243/* 244 * The following only work if pte_present() is true. 245 * Undefined behaviour if not.. 246 */ 247static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER;} 248static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW;} 249static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC;} 250static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;} 251static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;} 252static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;} 253static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE;} 254 255static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } 256static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } 257 258static inline pte_t pte_rdprotect(pte_t pte) { 259 pte_val(pte) &= ~_PAGE_USER; return pte; } 260static inline pte_t pte_exprotect(pte_t pte) { 261 pte_val(pte) &= ~_PAGE_EXEC; return pte; } 262static inline pte_t pte_wrprotect(pte_t pte) { 263 pte_val(pte) &= ~(_PAGE_RW); return pte; } 264static inline pte_t pte_mkclean(pte_t pte) { 265 pte_val(pte) &= ~(_PAGE_DIRTY); return pte; } 266static inline pte_t pte_mkold(pte_t pte) { 267 pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } 268 269static inline pte_t pte_mkread(pte_t pte) { 270 pte_val(pte) |= _PAGE_USER; return pte; } 271static inline pte_t pte_mkexec(pte_t pte) { 272 pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; } 273static inline pte_t pte_mkwrite(pte_t pte) { 274 pte_val(pte) |= _PAGE_RW; return pte; } 275static inline pte_t pte_mkdirty(pte_t pte) { 276 pte_val(pte) |= _PAGE_DIRTY; return pte; } 277static inline pte_t pte_mkyoung(pte_t pte) { 278 pte_val(pte) |= _PAGE_ACCESSED; return pte; } 279static inline pte_t pte_mkhuge(pte_t pte) { 280 pte_val(pte) |= _PAGE_HUGE; return pte; } 281 282/* Atomic PTE updates */ 283static inline unsigned long pte_update(pte_t *p, unsigned long clr) 284{ 285 unsigned long old, tmp; 286 287 __asm__ __volatile__( 288 "1: ldarx %0,0,%3 # pte_update\n\ 289 andi. %1,%0,%6\n\ 290 bne- 1b \n\ 291 andc %1,%0,%4 \n\ 292 stdcx. %1,0,%3 \n\ 293 bne- 1b" 294 : "=&r" (old), "=&r" (tmp), "=m" (*p) 295 : "r" (p), "r" (clr), "m" (*p), "i" (_PAGE_BUSY) 296 : "cc" ); 297 return old; 298} 299 300/* PTE updating functions, this function puts the PTE in the 301 * batch, doesn't actually triggers the hash flush immediately, 302 * you need to call flush_tlb_pending() to do that. 303 */ 304extern void hpte_update(struct mm_struct *mm, unsigned long addr, unsigned long pte, 305 int wrprot); 306 307static inline int __ptep_test_and_clear_young(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 308{ 309 unsigned long old; 310 311 if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) 312 return 0; 313 old = pte_update(ptep, _PAGE_ACCESSED); 314 if (old & _PAGE_HASHPTE) { 315 hpte_update(mm, addr, old, 0); 316 flush_tlb_pending(); 317 } 318 return (old & _PAGE_ACCESSED) != 0; 319} 320#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 321#define ptep_test_and_clear_young(__vma, __addr, __ptep) \ 322({ \ 323 int __r; \ 324 __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \ 325 __r; \ 326}) 327 328/* 329 * On RW/DIRTY bit transitions we can avoid flushing the hpte. For the 330 * moment we always flush but we need to fix hpte_update and test if the 331 * optimisation is worth it. 332 */ 333static inline int __ptep_test_and_clear_dirty(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 334{ 335 unsigned long old; 336 337 if ((pte_val(*ptep) & _PAGE_DIRTY) == 0) 338 return 0; 339 old = pte_update(ptep, _PAGE_DIRTY); 340 if (old & _PAGE_HASHPTE) 341 hpte_update(mm, addr, old, 0); 342 return (old & _PAGE_DIRTY) != 0; 343} 344#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY 345#define ptep_test_and_clear_dirty(__vma, __addr, __ptep) \ 346({ \ 347 int __r; \ 348 __r = __ptep_test_and_clear_dirty((__vma)->vm_mm, __addr, __ptep); \ 349 __r; \ 350}) 351 352#define __HAVE_ARCH_PTEP_SET_WRPROTECT 353static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 354{ 355 unsigned long old; 356 357 if ((pte_val(*ptep) & _PAGE_RW) == 0) 358 return; 359 old = pte_update(ptep, _PAGE_RW); 360 if (old & _PAGE_HASHPTE) 361 hpte_update(mm, addr, old, 0); 362} 363 364/* 365 * We currently remove entries from the hashtable regardless of whether 366 * the entry was young or dirty. The generic routines only flush if the 367 * entry was young or dirty which is not good enough. 368 * 369 * We should be more intelligent about this but for the moment we override 370 * these functions and force a tlb flush unconditionally 371 */ 372#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 373#define ptep_clear_flush_young(__vma, __address, __ptep) \ 374({ \ 375 int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \ 376 __ptep); \ 377 __young; \ 378}) 379 380#define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH 381#define ptep_clear_flush_dirty(__vma, __address, __ptep) \ 382({ \ 383 int __dirty = __ptep_test_and_clear_dirty((__vma)->vm_mm, __address, \ 384 __ptep); \ 385 flush_tlb_page(__vma, __address); \ 386 __dirty; \ 387}) 388 389#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 390static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 391{ 392 unsigned long old = pte_update(ptep, ~0UL); 393 394 if (old & _PAGE_HASHPTE) 395 hpte_update(mm, addr, old, 0); 396 return __pte(old); 397} 398 399static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t * ptep) 400{ 401 unsigned long old = pte_update(ptep, ~0UL); 402 403 if (old & _PAGE_HASHPTE) 404 hpte_update(mm, addr, old, 0); 405} 406 407/* 408 * set_pte stores a linux PTE into the linux page table. 409 */ 410static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 411 pte_t *ptep, pte_t pte) 412{ 413 if (pte_present(*ptep)) { 414 pte_clear(mm, addr, ptep); 415 flush_tlb_pending(); 416 } 417 *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); 418} 419 420/* Set the dirty and/or accessed bits atomically in a linux PTE, this 421 * function doesn't need to flush the hash entry 422 */ 423#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 424static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty) 425{ 426 unsigned long bits = pte_val(entry) & 427 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); 428 unsigned long old, tmp; 429 430 __asm__ __volatile__( 431 "1: ldarx %0,0,%4\n\ 432 andi. %1,%0,%6\n\ 433 bne- 1b \n\ 434 or %0,%3,%0\n\ 435 stdcx. %0,0,%4\n\ 436 bne- 1b" 437 :"=&r" (old), "=&r" (tmp), "=m" (*ptep) 438 :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY) 439 :"cc"); 440} 441#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ 442 do { \ 443 __ptep_set_access_flags(__ptep, __entry, __dirty); \ 444 flush_tlb_page_nohash(__vma, __address); \ 445 } while(0) 446 447/* 448 * Macro to mark a page protection value as "uncacheable". 449 */ 450#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED)) 451 452struct file; 453extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr, 454 unsigned long size, pgprot_t vma_prot); 455#define __HAVE_PHYS_MEM_ACCESS_PROT 456 457#define __HAVE_ARCH_PTE_SAME 458#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) 459 460#define pmd_ERROR(e) \ 461 printk("%s:%d: bad pmd %08x.\n", __FILE__, __LINE__, pmd_val(e)) 462#define pgd_ERROR(e) \ 463 printk("%s:%d: bad pgd %08x.\n", __FILE__, __LINE__, pgd_val(e)) 464 465extern pgd_t swapper_pg_dir[]; 466 467extern void paging_init(void); 468 469/* 470 * Because the huge pgtables are only 2 level, they can take 471 * at most around 4M, much less than one hugepage which the 472 * process is presumably entitled to use. So we don't bother 473 * freeing up the pagetables on unmap, and wait until 474 * destroy_context() to clean up the lot. 475 */ 476#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \ 477 do { } while (0) 478 479/* 480 * This gets called at the end of handling a page fault, when 481 * the kernel has put a new PTE into the page table for the process. 482 * We use it to put a corresponding HPTE into the hash table 483 * ahead of time, instead of waiting for the inevitable extra 484 * hash-table miss exception. 485 */ 486struct vm_area_struct; 487extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); 488 489/* Encode and de-code a swap entry */ 490#define __swp_type(entry) (((entry).val >> 1) & 0x3f) 491#define __swp_offset(entry) ((entry).val >> 8) 492#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) }) 493#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> PTE_SHIFT }) 494#define __swp_entry_to_pte(x) ((pte_t) { (x).val << PTE_SHIFT }) 495#define pte_to_pgoff(pte) (pte_val(pte) >> PTE_SHIFT) 496#define pgoff_to_pte(off) ((pte_t) {((off) << PTE_SHIFT)|_PAGE_FILE}) 497#define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_SHIFT) 498 499/* 500 * kern_addr_valid is intended to indicate whether an address is a valid 501 * kernel address. Most 32-bit archs define it as always true (like this) 502 * but most 64-bit archs actually perform a test. What should we do here? 503 * The only use is in fs/ncpfs/dir.c 504 */ 505#define kern_addr_valid(addr) (1) 506 507#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 508 remap_pfn_range(vma, vaddr, pfn, size, prot) 509 510void pgtable_cache_init(void); 511 512/* 513 * find_linux_pte returns the address of a linux pte for a given 514 * effective address and directory. If not found, it returns zero. 515 */ 516static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea) 517{ 518 pgd_t *pg; 519 pud_t *pu; 520 pmd_t *pm; 521 pte_t *pt = NULL; 522 pte_t pte; 523 524 pg = pgdir + pgd_index(ea); 525 if (!pgd_none(*pg)) { 526 pu = pud_offset(pg, ea); 527 if (!pud_none(*pu)) { 528 pm = pmd_offset(pu, ea); 529 if (pmd_present(*pm)) { 530 pt = pte_offset_kernel(pm, ea); 531 pte = *pt; 532 if (!pte_present(pte)) 533 pt = NULL; 534 } 535 } 536 } 537 538 return pt; 539} 540 541#include <asm-generic/pgtable.h> 542 543#endif /* __ASSEMBLY__ */ 544 545#endif /* _PPC64_PGTABLE_H */