Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.12-rc2 602 lines 20 kB view raw
1#ifndef _PPC64_PGTABLE_H 2#define _PPC64_PGTABLE_H 3 4#include <asm-generic/4level-fixup.h> 5 6/* 7 * This file contains the functions and defines necessary to modify and use 8 * the ppc64 hashed page table. 9 */ 10 11#ifndef __ASSEMBLY__ 12#include <linux/config.h> 13#include <linux/stddef.h> 14#include <asm/processor.h> /* For TASK_SIZE */ 15#include <asm/mmu.h> 16#include <asm/page.h> 17#include <asm/tlbflush.h> 18#endif /* __ASSEMBLY__ */ 19 20/* PMD_SHIFT determines what a second-level page table entry can map */ 21#define PMD_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3) 22#define PMD_SIZE (1UL << PMD_SHIFT) 23#define PMD_MASK (~(PMD_SIZE-1)) 24 25/* PGDIR_SHIFT determines what a third-level page table entry can map */ 26#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3) + (PAGE_SHIFT - 2)) 27#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 28#define PGDIR_MASK (~(PGDIR_SIZE-1)) 29 30/* 31 * Entries per page directory level. The PTE level must use a 64b record 32 * for each page table entry. The PMD and PGD level use a 32b record for 33 * each entry by assuming that each entry is page aligned. 34 */ 35#define PTE_INDEX_SIZE 9 36#define PMD_INDEX_SIZE 10 37#define PGD_INDEX_SIZE 10 38 39#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) 40#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) 41#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) 42 43#define USER_PTRS_PER_PGD (1024) 44#define FIRST_USER_PGD_NR 0 45 46#define EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \ 47 PGD_INDEX_SIZE + PAGE_SHIFT) 48 49/* 50 * Size of EA range mapped by our pagetables. 51 */ 52#define PGTABLE_EA_BITS 41 53#define PGTABLE_EA_MASK ((1UL<<PGTABLE_EA_BITS)-1) 54 55/* 56 * Define the address range of the vmalloc VM area. 57 */ 58#define VMALLOC_START (0xD000000000000000ul) 59#define VMALLOC_END (VMALLOC_START + PGTABLE_EA_MASK) 60 61/* 62 * Define the address range of the imalloc VM area. 63 * (used for ioremap) 64 */ 65#define IMALLOC_START (ioremap_bot) 66#define IMALLOC_VMADDR(x) ((unsigned long)(x)) 67#define PHBS_IO_BASE (0xE000000000000000ul) /* Reserve 2 gigs for PHBs */ 68#define IMALLOC_BASE (0xE000000080000000ul) 69#define IMALLOC_END (IMALLOC_BASE + PGTABLE_EA_MASK) 70 71/* 72 * Define the user address range 73 */ 74#define USER_START (0UL) 75#define USER_END (USER_START + PGTABLE_EA_MASK) 76 77 78/* 79 * Bits in a linux-style PTE. These match the bits in the 80 * (hardware-defined) PowerPC PTE as closely as possible. 81 */ 82#define _PAGE_PRESENT 0x0001 /* software: pte contains a translation */ 83#define _PAGE_USER 0x0002 /* matches one of the PP bits */ 84#define _PAGE_FILE 0x0002 /* (!present only) software: pte holds file offset */ 85#define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */ 86#define _PAGE_GUARDED 0x0008 87#define _PAGE_COHERENT 0x0010 /* M: enforce memory coherence (SMP systems) */ 88#define _PAGE_NO_CACHE 0x0020 /* I: cache inhibit */ 89#define _PAGE_WRITETHRU 0x0040 /* W: cache write-through */ 90#define _PAGE_DIRTY 0x0080 /* C: page changed */ 91#define _PAGE_ACCESSED 0x0100 /* R: page referenced */ 92#define _PAGE_RW 0x0200 /* software: user write access allowed */ 93#define _PAGE_HASHPTE 0x0400 /* software: pte has an associated HPTE */ 94#define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */ 95#define _PAGE_SECONDARY 0x8000 /* software: HPTE is in secondary group */ 96#define _PAGE_GROUP_IX 0x7000 /* software: HPTE index within group */ 97#define _PAGE_HUGE 0x10000 /* 16MB page */ 98/* Bits 0x7000 identify the index within an HPT Group */ 99#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | _PAGE_SECONDARY | _PAGE_GROUP_IX) 100/* PAGE_MASK gives the right answer below, but only by accident */ 101/* It should be preserving the high 48 bits and then specifically */ 102/* preserving _PAGE_SECONDARY | _PAGE_GROUP_IX */ 103#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HPTEFLAGS) 104 105#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT) 106 107#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY) 108 109/* __pgprot defined in asm-ppc64/page.h */ 110#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED) 111 112#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER) 113#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER | _PAGE_EXEC) 114#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) 115#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) 116#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) 117#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) 118#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_WRENABLE) 119#define PAGE_KERNEL_CI __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ 120 _PAGE_WRENABLE | _PAGE_NO_CACHE | _PAGE_GUARDED) 121#define PAGE_KERNEL_EXEC __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_EXEC) 122 123#define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE) 124#define HAVE_PAGE_AGP 125 126/* 127 * This bit in a hardware PTE indicates that the page is *not* executable. 128 */ 129#define HW_NO_EXEC _PAGE_EXEC 130 131/* 132 * POWER4 and newer have per page execute protection, older chips can only 133 * do this on a segment (256MB) basis. 134 * 135 * Also, write permissions imply read permissions. 136 * This is the closest we can get.. 137 * 138 * Note due to the way vm flags are laid out, the bits are XWR 139 */ 140#define __P000 PAGE_NONE 141#define __P001 PAGE_READONLY 142#define __P010 PAGE_COPY 143#define __P011 PAGE_COPY 144#define __P100 PAGE_READONLY_X 145#define __P101 PAGE_READONLY_X 146#define __P110 PAGE_COPY_X 147#define __P111 PAGE_COPY_X 148 149#define __S000 PAGE_NONE 150#define __S001 PAGE_READONLY 151#define __S010 PAGE_SHARED 152#define __S011 PAGE_SHARED 153#define __S100 PAGE_READONLY_X 154#define __S101 PAGE_READONLY_X 155#define __S110 PAGE_SHARED_X 156#define __S111 PAGE_SHARED_X 157 158#ifndef __ASSEMBLY__ 159 160/* 161 * ZERO_PAGE is a global shared page that is always zero: used 162 * for zero-mapped memory areas etc.. 163 */ 164extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; 165#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 166#endif /* __ASSEMBLY__ */ 167 168/* shift to put page number into pte */ 169#define PTE_SHIFT (17) 170 171/* We allow 2^41 bytes of real memory, so we need 29 bits in the PMD 172 * to give the PTE page number. The bottom two bits are for flags. */ 173#define PMD_TO_PTEPAGE_SHIFT (2) 174 175#ifdef CONFIG_HUGETLB_PAGE 176 177#ifndef __ASSEMBLY__ 178int hash_huge_page(struct mm_struct *mm, unsigned long access, 179 unsigned long ea, unsigned long vsid, int local); 180 181void hugetlb_mm_free_pgd(struct mm_struct *mm); 182#endif /* __ASSEMBLY__ */ 183 184#define HAVE_ARCH_UNMAPPED_AREA 185#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 186#else 187 188#define hash_huge_page(mm,a,ea,vsid,local) -1 189#define hugetlb_mm_free_pgd(mm) do {} while (0) 190 191#endif 192 193#ifndef __ASSEMBLY__ 194 195/* 196 * Conversion functions: convert a page and protection to a page entry, 197 * and a page entry and page directory to the page they refer to. 198 * 199 * mk_pte takes a (struct page *) as input 200 */ 201#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 202 203#define pfn_pte(pfn,pgprot) \ 204({ \ 205 pte_t pte; \ 206 pte_val(pte) = ((unsigned long)(pfn) << PTE_SHIFT) | \ 207 pgprot_val(pgprot); \ 208 pte; \ 209}) 210 211#define pte_modify(_pte, newprot) \ 212 (__pte((pte_val(_pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))) 213 214#define pte_none(pte) ((pte_val(pte) & ~_PAGE_HPTEFLAGS) == 0) 215#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) 216 217/* pte_clear moved to later in this file */ 218 219#define pte_pfn(x) ((unsigned long)((pte_val(x) >> PTE_SHIFT))) 220#define pte_page(x) pfn_to_page(pte_pfn(x)) 221 222#define pmd_set(pmdp, ptep) \ 223 (pmd_val(*(pmdp)) = (__ba_to_bpn(ptep) << PMD_TO_PTEPAGE_SHIFT)) 224#define pmd_none(pmd) (!pmd_val(pmd)) 225#define pmd_bad(pmd) (pmd_val(pmd) == 0) 226#define pmd_present(pmd) (pmd_val(pmd) != 0) 227#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) 228#define pmd_page_kernel(pmd) \ 229 (__bpn_to_ba(pmd_val(pmd) >> PMD_TO_PTEPAGE_SHIFT)) 230#define pmd_page(pmd) virt_to_page(pmd_page_kernel(pmd)) 231#define pgd_set(pgdp, pmdp) (pgd_val(*(pgdp)) = (__ba_to_bpn(pmdp))) 232#define pgd_none(pgd) (!pgd_val(pgd)) 233#define pgd_bad(pgd) ((pgd_val(pgd)) == 0) 234#define pgd_present(pgd) (pgd_val(pgd) != 0UL) 235#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0UL) 236#define pgd_page(pgd) (__bpn_to_ba(pgd_val(pgd))) 237 238/* 239 * Find an entry in a page-table-directory. We combine the address region 240 * (the high order N bits) and the pgd portion of the address. 241 */ 242/* to avoid overflow in free_pgtables we don't use PTRS_PER_PGD here */ 243#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & 0x7ff) 244 245#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) 246 247/* Find an entry in the second-level page table.. */ 248#define pmd_offset(dir,addr) \ 249 ((pmd_t *) pgd_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) 250 251/* Find an entry in the third-level page table.. */ 252#define pte_offset_kernel(dir,addr) \ 253 ((pte_t *) pmd_page_kernel(*(dir)) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) 254 255#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) 256#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) 257#define pte_unmap(pte) do { } while(0) 258#define pte_unmap_nested(pte) do { } while(0) 259 260/* to find an entry in a kernel page-table-directory */ 261/* This now only contains the vmalloc pages */ 262#define pgd_offset_k(address) pgd_offset(&init_mm, address) 263 264/* to find an entry in the ioremap page-table-directory */ 265#define pgd_offset_i(address) (ioremap_pgd + pgd_index(address)) 266 267#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) 268 269/* 270 * The following only work if pte_present() is true. 271 * Undefined behaviour if not.. 272 */ 273static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER;} 274static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW;} 275static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC;} 276static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;} 277static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;} 278static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;} 279static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE;} 280 281static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } 282static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } 283 284static inline pte_t pte_rdprotect(pte_t pte) { 285 pte_val(pte) &= ~_PAGE_USER; return pte; } 286static inline pte_t pte_exprotect(pte_t pte) { 287 pte_val(pte) &= ~_PAGE_EXEC; return pte; } 288static inline pte_t pte_wrprotect(pte_t pte) { 289 pte_val(pte) &= ~(_PAGE_RW); return pte; } 290static inline pte_t pte_mkclean(pte_t pte) { 291 pte_val(pte) &= ~(_PAGE_DIRTY); return pte; } 292static inline pte_t pte_mkold(pte_t pte) { 293 pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } 294 295static inline pte_t pte_mkread(pte_t pte) { 296 pte_val(pte) |= _PAGE_USER; return pte; } 297static inline pte_t pte_mkexec(pte_t pte) { 298 pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; } 299static inline pte_t pte_mkwrite(pte_t pte) { 300 pte_val(pte) |= _PAGE_RW; return pte; } 301static inline pte_t pte_mkdirty(pte_t pte) { 302 pte_val(pte) |= _PAGE_DIRTY; return pte; } 303static inline pte_t pte_mkyoung(pte_t pte) { 304 pte_val(pte) |= _PAGE_ACCESSED; return pte; } 305static inline pte_t pte_mkhuge(pte_t pte) { 306 pte_val(pte) |= _PAGE_HUGE; return pte; } 307 308/* Atomic PTE updates */ 309static inline unsigned long pte_update(pte_t *p, unsigned long clr) 310{ 311 unsigned long old, tmp; 312 313 __asm__ __volatile__( 314 "1: ldarx %0,0,%3 # pte_update\n\ 315 andi. %1,%0,%6\n\ 316 bne- 1b \n\ 317 andc %1,%0,%4 \n\ 318 stdcx. %1,0,%3 \n\ 319 bne- 1b" 320 : "=&r" (old), "=&r" (tmp), "=m" (*p) 321 : "r" (p), "r" (clr), "m" (*p), "i" (_PAGE_BUSY) 322 : "cc" ); 323 return old; 324} 325 326/* PTE updating functions, this function puts the PTE in the 327 * batch, doesn't actually triggers the hash flush immediately, 328 * you need to call flush_tlb_pending() to do that. 329 */ 330extern void hpte_update(struct mm_struct *mm, unsigned long addr, unsigned long pte, 331 int wrprot); 332 333static inline int __ptep_test_and_clear_young(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 334{ 335 unsigned long old; 336 337 if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) 338 return 0; 339 old = pte_update(ptep, _PAGE_ACCESSED); 340 if (old & _PAGE_HASHPTE) { 341 hpte_update(mm, addr, old, 0); 342 flush_tlb_pending(); 343 } 344 return (old & _PAGE_ACCESSED) != 0; 345} 346#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 347#define ptep_test_and_clear_young(__vma, __addr, __ptep) \ 348({ \ 349 int __r; \ 350 __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \ 351 __r; \ 352}) 353 354/* 355 * On RW/DIRTY bit transitions we can avoid flushing the hpte. For the 356 * moment we always flush but we need to fix hpte_update and test if the 357 * optimisation is worth it. 358 */ 359static inline int __ptep_test_and_clear_dirty(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 360{ 361 unsigned long old; 362 363 if ((pte_val(*ptep) & _PAGE_DIRTY) == 0) 364 return 0; 365 old = pte_update(ptep, _PAGE_DIRTY); 366 if (old & _PAGE_HASHPTE) 367 hpte_update(mm, addr, old, 0); 368 return (old & _PAGE_DIRTY) != 0; 369} 370#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY 371#define ptep_test_and_clear_dirty(__vma, __addr, __ptep) \ 372({ \ 373 int __r; \ 374 __r = __ptep_test_and_clear_dirty((__vma)->vm_mm, __addr, __ptep); \ 375 __r; \ 376}) 377 378#define __HAVE_ARCH_PTEP_SET_WRPROTECT 379static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 380{ 381 unsigned long old; 382 383 if ((pte_val(*ptep) & _PAGE_RW) == 0) 384 return; 385 old = pte_update(ptep, _PAGE_RW); 386 if (old & _PAGE_HASHPTE) 387 hpte_update(mm, addr, old, 0); 388} 389 390/* 391 * We currently remove entries from the hashtable regardless of whether 392 * the entry was young or dirty. The generic routines only flush if the 393 * entry was young or dirty which is not good enough. 394 * 395 * We should be more intelligent about this but for the moment we override 396 * these functions and force a tlb flush unconditionally 397 */ 398#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 399#define ptep_clear_flush_young(__vma, __address, __ptep) \ 400({ \ 401 int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \ 402 __ptep); \ 403 __young; \ 404}) 405 406#define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH 407#define ptep_clear_flush_dirty(__vma, __address, __ptep) \ 408({ \ 409 int __dirty = __ptep_test_and_clear_dirty((__vma)->vm_mm, __address, \ 410 __ptep); \ 411 flush_tlb_page(__vma, __address); \ 412 __dirty; \ 413}) 414 415#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 416static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 417{ 418 unsigned long old = pte_update(ptep, ~0UL); 419 420 if (old & _PAGE_HASHPTE) 421 hpte_update(mm, addr, old, 0); 422 return __pte(old); 423} 424 425static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t * ptep) 426{ 427 unsigned long old = pte_update(ptep, ~0UL); 428 429 if (old & _PAGE_HASHPTE) 430 hpte_update(mm, addr, old, 0); 431} 432 433/* 434 * set_pte stores a linux PTE into the linux page table. 435 */ 436static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 437 pte_t *ptep, pte_t pte) 438{ 439 if (pte_present(*ptep)) { 440 pte_clear(mm, addr, ptep); 441 flush_tlb_pending(); 442 } 443 *ptep = __pte(pte_val(pte)) & ~_PAGE_HPTEFLAGS; 444} 445 446/* Set the dirty and/or accessed bits atomically in a linux PTE, this 447 * function doesn't need to flush the hash entry 448 */ 449#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 450static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty) 451{ 452 unsigned long bits = pte_val(entry) & 453 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); 454 unsigned long old, tmp; 455 456 __asm__ __volatile__( 457 "1: ldarx %0,0,%4\n\ 458 andi. %1,%0,%6\n\ 459 bne- 1b \n\ 460 or %0,%3,%0\n\ 461 stdcx. %0,0,%4\n\ 462 bne- 1b" 463 :"=&r" (old), "=&r" (tmp), "=m" (*ptep) 464 :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY) 465 :"cc"); 466} 467#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ 468 do { \ 469 __ptep_set_access_flags(__ptep, __entry, __dirty); \ 470 flush_tlb_page_nohash(__vma, __address); \ 471 } while(0) 472 473/* 474 * Macro to mark a page protection value as "uncacheable". 475 */ 476#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED)) 477 478struct file; 479extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr, 480 unsigned long size, pgprot_t vma_prot); 481#define __HAVE_PHYS_MEM_ACCESS_PROT 482 483#define __HAVE_ARCH_PTE_SAME 484#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) 485 486extern unsigned long ioremap_bot, ioremap_base; 487 488#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) 489#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) 490 491#define pte_ERROR(e) \ 492 printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) 493#define pmd_ERROR(e) \ 494 printk("%s:%d: bad pmd %08x.\n", __FILE__, __LINE__, pmd_val(e)) 495#define pgd_ERROR(e) \ 496 printk("%s:%d: bad pgd %08x.\n", __FILE__, __LINE__, pgd_val(e)) 497 498extern pgd_t swapper_pg_dir[1024]; 499extern pgd_t ioremap_dir[1024]; 500 501extern void paging_init(void); 502 503struct mmu_gather; 504void hugetlb_free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev, 505 unsigned long start, unsigned long end); 506 507/* 508 * This gets called at the end of handling a page fault, when 509 * the kernel has put a new PTE into the page table for the process. 510 * We use it to put a corresponding HPTE into the hash table 511 * ahead of time, instead of waiting for the inevitable extra 512 * hash-table miss exception. 513 */ 514struct vm_area_struct; 515extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); 516 517/* Encode and de-code a swap entry */ 518#define __swp_type(entry) (((entry).val >> 1) & 0x3f) 519#define __swp_offset(entry) ((entry).val >> 8) 520#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) }) 521#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> PTE_SHIFT }) 522#define __swp_entry_to_pte(x) ((pte_t) { (x).val << PTE_SHIFT }) 523#define pte_to_pgoff(pte) (pte_val(pte) >> PTE_SHIFT) 524#define pgoff_to_pte(off) ((pte_t) {((off) << PTE_SHIFT)|_PAGE_FILE}) 525#define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_SHIFT) 526 527/* 528 * kern_addr_valid is intended to indicate whether an address is a valid 529 * kernel address. Most 32-bit archs define it as always true (like this) 530 * but most 64-bit archs actually perform a test. What should we do here? 531 * The only use is in fs/ncpfs/dir.c 532 */ 533#define kern_addr_valid(addr) (1) 534 535#define io_remap_page_range(vma, vaddr, paddr, size, prot) \ 536 remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot) 537 538#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 539 remap_pfn_range(vma, vaddr, pfn, size, prot) 540 541#define MK_IOSPACE_PFN(space, pfn) (pfn) 542#define GET_IOSPACE(pfn) 0 543#define GET_PFN(pfn) (pfn) 544 545void pgtable_cache_init(void); 546 547extern void hpte_init_native(void); 548extern void hpte_init_lpar(void); 549extern void hpte_init_iSeries(void); 550 551/* imalloc region types */ 552#define IM_REGION_UNUSED 0x1 553#define IM_REGION_SUBSET 0x2 554#define IM_REGION_EXISTS 0x4 555#define IM_REGION_OVERLAP 0x8 556#define IM_REGION_SUPERSET 0x10 557 558extern struct vm_struct * im_get_free_area(unsigned long size); 559extern struct vm_struct * im_get_area(unsigned long v_addr, unsigned long size, 560 int region_type); 561unsigned long im_free(void *addr); 562 563extern long pSeries_lpar_hpte_insert(unsigned long hpte_group, 564 unsigned long va, unsigned long prpn, 565 int secondary, unsigned long hpteflags, 566 int bolted, int large); 567 568extern long native_hpte_insert(unsigned long hpte_group, unsigned long va, 569 unsigned long prpn, int secondary, 570 unsigned long hpteflags, int bolted, int large); 571 572/* 573 * find_linux_pte returns the address of a linux pte for a given 574 * effective address and directory. If not found, it returns zero. 575 */ 576static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea) 577{ 578 pgd_t *pg; 579 pmd_t *pm; 580 pte_t *pt = NULL; 581 pte_t pte; 582 583 pg = pgdir + pgd_index(ea); 584 if (!pgd_none(*pg)) { 585 586 pm = pmd_offset(pg, ea); 587 if (pmd_present(*pm)) { 588 pt = pte_offset_kernel(pm, ea); 589 pte = *pt; 590 if (!pte_present(pte)) 591 pt = NULL; 592 } 593 } 594 595 return pt; 596} 597 598#include <asm-generic/pgtable.h> 599 600#endif /* __ASSEMBLY__ */ 601 602#endif /* _PPC64_PGTABLE_H */