Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.17-rc6 547 lines 20 kB view raw
1#ifndef _PARISC_PGTABLE_H 2#define _PARISC_PGTABLE_H 3 4#include <asm-generic/4level-fixup.h> 5 6#include <linux/config.h> 7#include <asm/fixmap.h> 8 9#ifndef __ASSEMBLY__ 10/* 11 * we simulate an x86-style page table for the linux mm code 12 */ 13 14#include <linux/spinlock.h> 15#include <linux/mm.h> /* for vm_area_struct */ 16#include <asm/processor.h> 17#include <asm/cache.h> 18#include <asm/bitops.h> 19 20/* 21 * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel 22 * memory. For the return value to be meaningful, ADDR must be >= 23 * PAGE_OFFSET. This operation can be relatively expensive (e.g., 24 * require a hash-, or multi-level tree-lookup or something of that 25 * sort) but it guarantees to return TRUE only if accessing the page 26 * at that address does not cause an error. Note that there may be 27 * addresses for which kern_addr_valid() returns FALSE even though an 28 * access would not cause an error (e.g., this is typically true for 29 * memory mapped I/O regions. 30 * 31 * XXX Need to implement this for parisc. 32 */ 33#define kern_addr_valid(addr) (1) 34 35/* Certain architectures need to do special things when PTEs 36 * within a page table are directly modified. Thus, the following 37 * hook is made available. 38 */ 39#define set_pte(pteptr, pteval) \ 40 do{ \ 41 *(pteptr) = (pteval); \ 42 } while(0) 43#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) 44 45#endif /* !__ASSEMBLY__ */ 46 47#define pte_ERROR(e) \ 48 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) 49#define pmd_ERROR(e) \ 50 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, (unsigned long)pmd_val(e)) 51#define pgd_ERROR(e) \ 52 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e)) 53 54 /* Note: If you change ISTACK_SIZE, you need to change the corresponding 55 * values in vmlinux.lds and vmlinux64.lds (init_istack section). Also, 56 * the "order" and size need to agree. 57 */ 58 59#define ISTACK_SIZE 32768 /* Interrupt Stack Size */ 60#define ISTACK_ORDER 3 61 62/* This is the size of the initially mapped kernel memory */ 63#ifdef CONFIG_64BIT 64#define KERNEL_INITIAL_ORDER 24 /* 0 to 1<<24 = 16MB */ 65#else 66#define KERNEL_INITIAL_ORDER 23 /* 0 to 1<<23 = 8MB */ 67#endif 68#define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER) 69 70#if defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB) 71#define PT_NLEVELS 3 72#define PGD_ORDER 1 /* Number of pages per pgd */ 73#define PMD_ORDER 1 /* Number of pages per pmd */ 74#define PGD_ALLOC_ORDER 2 /* first pgd contains pmd */ 75#else 76#define PT_NLEVELS 2 77#define PGD_ORDER 1 /* Number of pages per pgd */ 78#define PGD_ALLOC_ORDER PGD_ORDER 79#endif 80 81/* Definitions for 3rd level (we use PLD here for Page Lower directory 82 * because PTE_SHIFT is used lower down to mean shift that has to be 83 * done to get usable bits out of the PTE) */ 84#define PLD_SHIFT PAGE_SHIFT 85#define PLD_SIZE PAGE_SIZE 86#define BITS_PER_PTE (PAGE_SHIFT - BITS_PER_PTE_ENTRY) 87#define PTRS_PER_PTE (1UL << BITS_PER_PTE) 88 89/* Definitions for 2nd level */ 90#define pgtable_cache_init() do { } while (0) 91 92#define PMD_SHIFT (PLD_SHIFT + BITS_PER_PTE) 93#define PMD_SIZE (1UL << PMD_SHIFT) 94#define PMD_MASK (~(PMD_SIZE-1)) 95#if PT_NLEVELS == 3 96#define BITS_PER_PMD (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY) 97#else 98#define BITS_PER_PMD 0 99#endif 100#define PTRS_PER_PMD (1UL << BITS_PER_PMD) 101 102/* Definitions for 1st level */ 103#define PGDIR_SHIFT (PMD_SHIFT + BITS_PER_PMD) 104#define BITS_PER_PGD (PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY) 105#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 106#define PGDIR_MASK (~(PGDIR_SIZE-1)) 107#define PTRS_PER_PGD (1UL << BITS_PER_PGD) 108#define USER_PTRS_PER_PGD PTRS_PER_PGD 109 110#define MAX_ADDRBITS (PGDIR_SHIFT + BITS_PER_PGD) 111#define MAX_ADDRESS (1UL << MAX_ADDRBITS) 112 113#define SPACEID_SHIFT (MAX_ADDRBITS - 32) 114 115/* This calculates the number of initial pages we need for the initial 116 * page tables */ 117#if (KERNEL_INITIAL_ORDER) >= (PMD_SHIFT) 118# define PT_INITIAL (1 << (KERNEL_INITIAL_ORDER - PMD_SHIFT)) 119#else 120# define PT_INITIAL (1) /* all initial PTEs fit into one page */ 121#endif 122 123/* 124 * pgd entries used up by user/kernel: 125 */ 126 127#define FIRST_USER_ADDRESS 0 128 129#ifndef __ASSEMBLY__ 130extern void *vmalloc_start; 131#define PCXL_DMA_MAP_SIZE (8*1024*1024) 132#define VMALLOC_START ((unsigned long)vmalloc_start) 133/* this is a fixmap remnant, see fixmap.h */ 134#define VMALLOC_END (KERNEL_MAP_END) 135#endif 136 137/* NB: The tlb miss handlers make certain assumptions about the order */ 138/* of the following bits, so be careful (One example, bits 25-31 */ 139/* are moved together in one instruction). */ 140 141#define _PAGE_READ_BIT 31 /* (0x001) read access allowed */ 142#define _PAGE_WRITE_BIT 30 /* (0x002) write access allowed */ 143#define _PAGE_EXEC_BIT 29 /* (0x004) execute access allowed */ 144#define _PAGE_GATEWAY_BIT 28 /* (0x008) privilege promotion allowed */ 145#define _PAGE_DMB_BIT 27 /* (0x010) Data Memory Break enable (B bit) */ 146#define _PAGE_DIRTY_BIT 26 /* (0x020) Page Dirty (D bit) */ 147#define _PAGE_FILE_BIT _PAGE_DIRTY_BIT /* overload this bit */ 148#define _PAGE_REFTRAP_BIT 25 /* (0x040) Page Ref. Trap enable (T bit) */ 149#define _PAGE_NO_CACHE_BIT 24 /* (0x080) Uncached Page (U bit) */ 150#define _PAGE_ACCESSED_BIT 23 /* (0x100) Software: Page Accessed */ 151#define _PAGE_PRESENT_BIT 22 /* (0x200) Software: translation valid */ 152#define _PAGE_FLUSH_BIT 21 /* (0x400) Software: translation valid */ 153 /* for cache flushing only */ 154#define _PAGE_USER_BIT 20 /* (0x800) Software: User accessible page */ 155 156/* N.B. The bits are defined in terms of a 32 bit word above, so the */ 157/* following macro is ok for both 32 and 64 bit. */ 158 159#define xlate_pabit(x) (31 - x) 160 161/* this defines the shift to the usable bits in the PTE it is set so 162 * that the valid bits _PAGE_PRESENT_BIT and _PAGE_USER_BIT are set 163 * to zero */ 164#define PTE_SHIFT xlate_pabit(_PAGE_USER_BIT) 165 166/* PFN_PTE_SHIFT defines the shift of a PTE value to access the PFN field */ 167#define PFN_PTE_SHIFT 12 168 169 170/* this is how many bits may be used by the file functions */ 171#define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_SHIFT) 172 173#define pte_to_pgoff(pte) (pte_val(pte) >> PTE_SHIFT) 174#define pgoff_to_pte(off) ((pte_t) { ((off) << PTE_SHIFT) | _PAGE_FILE }) 175 176#define _PAGE_READ (1 << xlate_pabit(_PAGE_READ_BIT)) 177#define _PAGE_WRITE (1 << xlate_pabit(_PAGE_WRITE_BIT)) 178#define _PAGE_RW (_PAGE_READ | _PAGE_WRITE) 179#define _PAGE_EXEC (1 << xlate_pabit(_PAGE_EXEC_BIT)) 180#define _PAGE_GATEWAY (1 << xlate_pabit(_PAGE_GATEWAY_BIT)) 181#define _PAGE_DMB (1 << xlate_pabit(_PAGE_DMB_BIT)) 182#define _PAGE_DIRTY (1 << xlate_pabit(_PAGE_DIRTY_BIT)) 183#define _PAGE_REFTRAP (1 << xlate_pabit(_PAGE_REFTRAP_BIT)) 184#define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT)) 185#define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT)) 186#define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT)) 187#define _PAGE_FLUSH (1 << xlate_pabit(_PAGE_FLUSH_BIT)) 188#define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT)) 189#define _PAGE_FILE (1 << xlate_pabit(_PAGE_FILE_BIT)) 190 191#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED) 192#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) 193#define _PAGE_KERNEL (_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED) 194 195/* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds 196 * are page-aligned, we don't care about the PAGE_OFFSET bits, except 197 * for a few meta-information bits, so we shift the address to be 198 * able to effectively address 40/42/44-bits of physical address space 199 * depending on 4k/16k/64k PAGE_SIZE */ 200#define _PxD_PRESENT_BIT 31 201#define _PxD_ATTACHED_BIT 30 202#define _PxD_VALID_BIT 29 203 204#define PxD_FLAG_PRESENT (1 << xlate_pabit(_PxD_PRESENT_BIT)) 205#define PxD_FLAG_ATTACHED (1 << xlate_pabit(_PxD_ATTACHED_BIT)) 206#define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT)) 207#define PxD_FLAG_MASK (0xf) 208#define PxD_FLAG_SHIFT (4) 209#define PxD_VALUE_SHIFT (8) /* (PAGE_SHIFT-PxD_FLAG_SHIFT) */ 210 211#ifndef __ASSEMBLY__ 212 213#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) 214#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED) 215/* Others seem to make this executable, I don't know if that's correct 216 or not. The stack is mapped this way though so this is necessary 217 in the short term - dhd@linuxcare.com, 2000-08-08 */ 218#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED) 219#define PAGE_WRITEONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITE | _PAGE_ACCESSED) 220#define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED) 221#define PAGE_COPY PAGE_EXECREAD 222#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED) 223#define PAGE_KERNEL __pgprot(_PAGE_KERNEL) 224#define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE) 225#define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE) 226#define PAGE_GATEWAY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_GATEWAY| _PAGE_READ) 227#define PAGE_FLUSH __pgprot(_PAGE_FLUSH) 228 229 230/* 231 * We could have an execute only page using "gateway - promote to priv 232 * level 3", but that is kind of silly. So, the way things are defined 233 * now, we must always have read permission for pages with execute 234 * permission. For the fun of it we'll go ahead and support write only 235 * pages. 236 */ 237 238 /*xwr*/ 239#define __P000 PAGE_NONE 240#define __P001 PAGE_READONLY 241#define __P010 __P000 /* copy on write */ 242#define __P011 __P001 /* copy on write */ 243#define __P100 PAGE_EXECREAD 244#define __P101 PAGE_EXECREAD 245#define __P110 __P100 /* copy on write */ 246#define __P111 __P101 /* copy on write */ 247 248#define __S000 PAGE_NONE 249#define __S001 PAGE_READONLY 250#define __S010 PAGE_WRITEONLY 251#define __S011 PAGE_SHARED 252#define __S100 PAGE_EXECREAD 253#define __S101 PAGE_EXECREAD 254#define __S110 PAGE_RWX 255#define __S111 PAGE_RWX 256 257 258extern pgd_t swapper_pg_dir[]; /* declared in init_task.c */ 259 260/* initial page tables for 0-8MB for kernel */ 261 262extern pte_t pg0[]; 263 264/* zero page used for uninitialized stuff */ 265 266extern unsigned long *empty_zero_page; 267 268/* 269 * ZERO_PAGE is a global shared page that is always zero: used 270 * for zero-mapped memory areas etc.. 271 */ 272 273#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 274 275#define pte_none(x) ((pte_val(x) == 0) || (pte_val(x) & _PAGE_FLUSH)) 276#define pte_present(x) (pte_val(x) & _PAGE_PRESENT) 277#define pte_clear(mm,addr,xp) do { pte_val(*(xp)) = 0; } while (0) 278 279#define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK) 280#define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) 281#define pgd_flag(x) (pgd_val(x) & PxD_FLAG_MASK) 282#define pgd_address(x) ((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) 283 284#if PT_NLEVELS == 3 285/* The first entry of the permanent pmd is not there if it contains 286 * the gateway marker */ 287#define pmd_none(x) (!pmd_val(x) || pmd_flag(x) == PxD_FLAG_ATTACHED) 288#else 289#define pmd_none(x) (!pmd_val(x)) 290#endif 291#define pmd_bad(x) (!(pmd_flag(x) & PxD_FLAG_VALID)) 292#define pmd_present(x) (pmd_flag(x) & PxD_FLAG_PRESENT) 293static inline void pmd_clear(pmd_t *pmd) { 294#if PT_NLEVELS == 3 295 if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) 296 /* This is the entry pointing to the permanent pmd 297 * attached to the pgd; cannot clear it */ 298 __pmd_val_set(*pmd, PxD_FLAG_ATTACHED); 299 else 300#endif 301 __pmd_val_set(*pmd, 0); 302} 303 304 305 306#if PT_NLEVELS == 3 307#define pgd_page(pgd) ((unsigned long) __va(pgd_address(pgd))) 308 309/* For 64 bit we have three level tables */ 310 311#define pgd_none(x) (!pgd_val(x)) 312#define pgd_bad(x) (!(pgd_flag(x) & PxD_FLAG_VALID)) 313#define pgd_present(x) (pgd_flag(x) & PxD_FLAG_PRESENT) 314static inline void pgd_clear(pgd_t *pgd) { 315#if PT_NLEVELS == 3 316 if(pgd_flag(*pgd) & PxD_FLAG_ATTACHED) 317 /* This is the permanent pmd attached to the pgd; cannot 318 * free it */ 319 return; 320#endif 321 __pgd_val_set(*pgd, 0); 322} 323#else 324/* 325 * The "pgd_xxx()" functions here are trivial for a folded two-level 326 * setup: the pgd is never bad, and a pmd always exists (as it's folded 327 * into the pgd entry) 328 */ 329extern inline int pgd_none(pgd_t pgd) { return 0; } 330extern inline int pgd_bad(pgd_t pgd) { return 0; } 331extern inline int pgd_present(pgd_t pgd) { return 1; } 332extern inline void pgd_clear(pgd_t * pgdp) { } 333#endif 334 335/* 336 * The following only work if pte_present() is true. 337 * Undefined behaviour if not.. 338 */ 339extern inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_READ; } 340extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } 341extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 342extern inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } 343extern inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } 344extern inline int pte_user(pte_t pte) { return pte_val(pte) & _PAGE_USER; } 345 346extern inline pte_t pte_rdprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_READ; return pte; } 347extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; } 348extern inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } 349extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_WRITE; return pte; } 350extern inline pte_t pte_mkread(pte_t pte) { pte_val(pte) |= _PAGE_READ; return pte; } 351extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; } 352extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; } 353extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; } 354 355/* 356 * Conversion functions: convert a page and protection to a page entry, 357 * and a page entry and page directory to the page they refer to. 358 */ 359#define __mk_pte(addr,pgprot) \ 360({ \ 361 pte_t __pte; \ 362 \ 363 pte_val(__pte) = ((((addr)>>PAGE_SHIFT)<<PFN_PTE_SHIFT) + pgprot_val(pgprot)); \ 364 \ 365 __pte; \ 366}) 367 368#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 369 370static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) 371{ 372 pte_t pte; 373 pte_val(pte) = (pfn << PFN_PTE_SHIFT) | pgprot_val(pgprot); 374 return pte; 375} 376 377extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 378{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; } 379 380/* Permanent address of a page. On parisc we don't have highmem. */ 381 382#define pte_pfn(x) (pte_val(x) >> PFN_PTE_SHIFT) 383 384#define pte_page(pte) (pfn_to_page(pte_pfn(pte))) 385 386#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_address(pmd))) 387 388#define __pmd_page(pmd) ((unsigned long) __va(pmd_address(pmd))) 389#define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd)) 390 391#define pgd_index(address) ((address) >> PGDIR_SHIFT) 392 393/* to find an entry in a page-table-directory */ 394#define pgd_offset(mm, address) \ 395((mm)->pgd + ((address) >> PGDIR_SHIFT)) 396 397/* to find an entry in a kernel page-table-directory */ 398#define pgd_offset_k(address) pgd_offset(&init_mm, address) 399 400/* Find an entry in the second-level page table.. */ 401 402#if PT_NLEVELS == 3 403#define pmd_offset(dir,address) \ 404((pmd_t *) pgd_page(*(dir)) + (((address)>>PMD_SHIFT) & (PTRS_PER_PMD-1))) 405#else 406#define pmd_offset(dir,addr) ((pmd_t *) dir) 407#endif 408 409/* Find an entry in the third-level page table.. */ 410#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1)) 411#define pte_offset_kernel(pmd, address) \ 412 ((pte_t *) pmd_page_kernel(*(pmd)) + pte_index(address)) 413#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) 414#define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address) 415#define pte_unmap(pte) do { } while (0) 416#define pte_unmap_nested(pte) do { } while (0) 417 418#define pte_unmap(pte) do { } while (0) 419#define pte_unmap_nested(pte) do { } while (0) 420 421extern void paging_init (void); 422 423/* Used for deferring calls to flush_dcache_page() */ 424 425#define PG_dcache_dirty PG_arch_1 426 427extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); 428 429/* Encode and de-code a swap entry */ 430 431#define __swp_type(x) ((x).val & 0x1f) 432#define __swp_offset(x) ( (((x).val >> 6) & 0x7) | \ 433 (((x).val >> 8) & ~0x7) ) 434#define __swp_entry(type, offset) ((swp_entry_t) { (type) | \ 435 ((offset & 0x7) << 6) | \ 436 ((offset & ~0x7) << 8) }) 437#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 438#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 439 440static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) 441{ 442#ifdef CONFIG_SMP 443 if (!pte_young(*ptep)) 444 return 0; 445 return test_and_clear_bit(xlate_pabit(_PAGE_ACCESSED_BIT), &pte_val(*ptep)); 446#else 447 pte_t pte = *ptep; 448 if (!pte_young(pte)) 449 return 0; 450 set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte)); 451 return 1; 452#endif 453} 454 455static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) 456{ 457#ifdef CONFIG_SMP 458 if (!pte_dirty(*ptep)) 459 return 0; 460 return test_and_clear_bit(xlate_pabit(_PAGE_DIRTY_BIT), &pte_val(*ptep)); 461#else 462 pte_t pte = *ptep; 463 if (!pte_dirty(pte)) 464 return 0; 465 set_pte_at(vma->vm_mm, addr, ptep, pte_mkclean(pte)); 466 return 1; 467#endif 468} 469 470extern spinlock_t pa_dbit_lock; 471 472struct mm_struct; 473static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 474{ 475 pte_t old_pte; 476 pte_t pte; 477 478 spin_lock(&pa_dbit_lock); 479 pte = old_pte = *ptep; 480 pte_val(pte) &= ~_PAGE_PRESENT; 481 pte_val(pte) |= _PAGE_FLUSH; 482 set_pte_at(mm,addr,ptep,pte); 483 spin_unlock(&pa_dbit_lock); 484 485 return old_pte; 486} 487 488static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 489{ 490#ifdef CONFIG_SMP 491 unsigned long new, old; 492 493 do { 494 old = pte_val(*ptep); 495 new = pte_val(pte_wrprotect(__pte (old))); 496 } while (cmpxchg((unsigned long *) ptep, old, new) != old); 497#else 498 pte_t old_pte = *ptep; 499 set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); 500#endif 501} 502 503#define pte_same(A,B) (pte_val(A) == pte_val(B)) 504 505#endif /* !__ASSEMBLY__ */ 506 507 508/* TLB page size encoding - see table 3-1 in parisc20.pdf */ 509#define _PAGE_SIZE_ENCODING_4K 0 510#define _PAGE_SIZE_ENCODING_16K 1 511#define _PAGE_SIZE_ENCODING_64K 2 512#define _PAGE_SIZE_ENCODING_256K 3 513#define _PAGE_SIZE_ENCODING_1M 4 514#define _PAGE_SIZE_ENCODING_4M 5 515#define _PAGE_SIZE_ENCODING_16M 6 516#define _PAGE_SIZE_ENCODING_64M 7 517 518#if defined(CONFIG_PARISC_PAGE_SIZE_4KB) 519# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_4K 520#elif defined(CONFIG_PARISC_PAGE_SIZE_16KB) 521# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_16K 522#elif defined(CONFIG_PARISC_PAGE_SIZE_64KB) 523# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_64K 524#endif 525 526 527#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 528 remap_pfn_range(vma, vaddr, pfn, size, prot) 529 530#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_NO_CACHE) 531 532#define MK_IOSPACE_PFN(space, pfn) (pfn) 533#define GET_IOSPACE(pfn) 0 534#define GET_PFN(pfn) (pfn) 535 536/* We provide our own get_unmapped_area to provide cache coherency */ 537 538#define HAVE_ARCH_UNMAPPED_AREA 539 540#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 541#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY 542#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 543#define __HAVE_ARCH_PTEP_SET_WRPROTECT 544#define __HAVE_ARCH_PTE_SAME 545#include <asm-generic/pgtable.h> 546 547#endif /* _PARISC_PGTABLE_H */