at v6.1 493 lines 17 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _PARISC_PGTABLE_H 3#define _PARISC_PGTABLE_H 4 5#include <asm/page.h> 6 7#if CONFIG_PGTABLE_LEVELS == 3 8#include <asm-generic/pgtable-nopud.h> 9#elif CONFIG_PGTABLE_LEVELS == 2 10#include <asm-generic/pgtable-nopmd.h> 11#endif 12 13#include <asm/fixmap.h> 14 15#ifndef __ASSEMBLY__ 16/* 17 * we simulate an x86-style page table for the linux mm code 18 */ 19 20#include <linux/bitops.h> 21#include <linux/spinlock.h> 22#include <linux/mm_types.h> 23#include <asm/processor.h> 24#include <asm/cache.h> 25 26/* 27 * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel 28 * memory. For the return value to be meaningful, ADDR must be >= 29 * PAGE_OFFSET. This operation can be relatively expensive (e.g., 30 * require a hash-, or multi-level tree-lookup or something of that 31 * sort) but it guarantees to return TRUE only if accessing the page 32 * at that address does not cause an error. Note that there may be 33 * addresses for which kern_addr_valid() returns FALSE even though an 34 * access would not cause an error (e.g., this is typically true for 35 * memory mapped I/O regions. 36 * 37 * XXX Need to implement this for parisc. 38 */ 39#define kern_addr_valid(addr) (1) 40 41/* This is for the serialization of PxTLB broadcasts. At least on the N class 42 * systems, only one PxTLB inter processor broadcast can be active at any one 43 * time on the Merced bus. */ 44extern spinlock_t pa_tlb_flush_lock; 45#if defined(CONFIG_64BIT) && defined(CONFIG_SMP) 46extern int pa_serialize_tlb_flushes; 47#else 48#define pa_serialize_tlb_flushes (0) 49#endif 50 51#define purge_tlb_start(flags) do { \ 52 if (pa_serialize_tlb_flushes) \ 53 spin_lock_irqsave(&pa_tlb_flush_lock, flags); \ 54 else \ 55 local_irq_save(flags); \ 56 } while (0) 57#define purge_tlb_end(flags) do { \ 58 if (pa_serialize_tlb_flushes) \ 59 spin_unlock_irqrestore(&pa_tlb_flush_lock, flags); \ 60 else \ 61 local_irq_restore(flags); \ 62 } while (0) 63 64/* Purge data and instruction TLB entries. The TLB purge instructions 65 * are slow on SMP machines since the purge must be broadcast to all CPUs. 66 */ 67 68static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) 69{ 70 unsigned long flags; 71 72 purge_tlb_start(flags); 73 mtsp(mm->context.space_id, SR_TEMP1); 74 pdtlb(SR_TEMP1, addr); 75 pitlb(SR_TEMP1, addr); 76 purge_tlb_end(flags); 77} 78 79extern void __update_cache(pte_t pte); 80 81/* Certain architectures need to do special things when PTEs 82 * within a page table are directly modified. Thus, the following 83 * hook is made available. 84 */ 85#define set_pte(pteptr, pteval) \ 86 do { \ 87 *(pteptr) = (pteval); \ 88 mb(); \ 89 } while(0) 90 91#define set_pte_at(mm, addr, pteptr, pteval) \ 92 do { \ 93 if (pte_present(pteval) && \ 94 pte_user(pteval)) \ 95 __update_cache(pteval); \ 96 *(pteptr) = (pteval); \ 97 purge_tlb_entries(mm, addr); \ 98 } while (0) 99 100#endif /* !__ASSEMBLY__ */ 101 102#define pte_ERROR(e) \ 103 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) 104#if CONFIG_PGTABLE_LEVELS == 3 105#define pmd_ERROR(e) \ 106 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, (unsigned long)pmd_val(e)) 107#endif 108#define pgd_ERROR(e) \ 109 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e)) 110 111/* This is the size of the initially mapped kernel memory */ 112#if defined(CONFIG_64BIT) 113#define KERNEL_INITIAL_ORDER 26 /* 1<<26 = 64MB */ 114#else 115#define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */ 116#endif 117#define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER) 118 119#if CONFIG_PGTABLE_LEVELS == 3 120#define PMD_TABLE_ORDER 1 121#define PGD_TABLE_ORDER 0 122#else 123#define PGD_TABLE_ORDER 1 124#endif 125 126/* Definitions for 3rd level (we use PLD here for Page Lower directory 127 * because PTE_SHIFT is used lower down to mean shift that has to be 128 * done to get usable bits out of the PTE) */ 129#define PLD_SHIFT PAGE_SHIFT 130#define PLD_SIZE PAGE_SIZE 131#define BITS_PER_PTE (PAGE_SHIFT - BITS_PER_PTE_ENTRY) 132#define PTRS_PER_PTE (1UL << BITS_PER_PTE) 133 134/* Definitions for 2nd level */ 135#if CONFIG_PGTABLE_LEVELS == 3 136#define PMD_SHIFT (PLD_SHIFT + BITS_PER_PTE) 137#define PMD_SIZE (1UL << PMD_SHIFT) 138#define PMD_MASK (~(PMD_SIZE-1)) 139#define BITS_PER_PMD (PAGE_SHIFT + PMD_TABLE_ORDER - BITS_PER_PMD_ENTRY) 140#define PTRS_PER_PMD (1UL << BITS_PER_PMD) 141#else 142#define BITS_PER_PMD 0 143#endif 144 145/* Definitions for 1st level */ 146#define PGDIR_SHIFT (PLD_SHIFT + BITS_PER_PTE + BITS_PER_PMD) 147#if (PGDIR_SHIFT + PAGE_SHIFT + PGD_TABLE_ORDER - BITS_PER_PGD_ENTRY) > BITS_PER_LONG 148#define BITS_PER_PGD (BITS_PER_LONG - PGDIR_SHIFT) 149#else 150#define BITS_PER_PGD (PAGE_SHIFT + PGD_TABLE_ORDER - BITS_PER_PGD_ENTRY) 151#endif 152#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 153#define PGDIR_MASK (~(PGDIR_SIZE-1)) 154#define PTRS_PER_PGD (1UL << BITS_PER_PGD) 155#define USER_PTRS_PER_PGD PTRS_PER_PGD 156 157#ifdef CONFIG_64BIT 158#define MAX_ADDRBITS (PGDIR_SHIFT + BITS_PER_PGD) 159#define MAX_ADDRESS (1UL << MAX_ADDRBITS) 160#define SPACEID_SHIFT (MAX_ADDRBITS - 32) 161#else 162#define MAX_ADDRBITS (BITS_PER_LONG) 163#define MAX_ADDRESS (1ULL << MAX_ADDRBITS) 164#define SPACEID_SHIFT 0 165#endif 166 167/* This calculates the number of initial pages we need for the initial 168 * page tables */ 169#if (KERNEL_INITIAL_ORDER) >= (PMD_SHIFT) 170# define PT_INITIAL (1 << (KERNEL_INITIAL_ORDER - PMD_SHIFT)) 171#else 172# define PT_INITIAL (1) /* all initial PTEs fit into one page */ 173#endif 174 175/* 176 * pgd entries used up by user/kernel: 177 */ 178 179/* NB: The tlb miss handlers make certain assumptions about the order */ 180/* of the following bits, so be careful (One example, bits 25-31 */ 181/* are moved together in one instruction). */ 182 183#define _PAGE_READ_BIT 31 /* (0x001) read access allowed */ 184#define _PAGE_WRITE_BIT 30 /* (0x002) write access allowed */ 185#define _PAGE_EXEC_BIT 29 /* (0x004) execute access allowed */ 186#define _PAGE_GATEWAY_BIT 28 /* (0x008) privilege promotion allowed */ 187#define _PAGE_DMB_BIT 27 /* (0x010) Data Memory Break enable (B bit) */ 188#define _PAGE_DIRTY_BIT 26 /* (0x020) Page Dirty (D bit) */ 189#define _PAGE_REFTRAP_BIT 25 /* (0x040) Page Ref. Trap enable (T bit) */ 190#define _PAGE_NO_CACHE_BIT 24 /* (0x080) Uncached Page (U bit) */ 191#define _PAGE_ACCESSED_BIT 23 /* (0x100) Software: Page Accessed */ 192#define _PAGE_PRESENT_BIT 22 /* (0x200) Software: translation valid */ 193#define _PAGE_HPAGE_BIT 21 /* (0x400) Software: Huge Page */ 194#define _PAGE_USER_BIT 20 /* (0x800) Software: User accessible page */ 195#ifdef CONFIG_HUGETLB_PAGE 196#define _PAGE_SPECIAL_BIT _PAGE_DMB_BIT /* DMB feature is currently unused */ 197#else 198#define _PAGE_SPECIAL_BIT _PAGE_HPAGE_BIT /* use unused HUGE PAGE bit */ 199#endif 200 201/* N.B. The bits are defined in terms of a 32 bit word above, so the */ 202/* following macro is ok for both 32 and 64 bit. */ 203 204#define xlate_pabit(x) (31 - x) 205 206/* this defines the shift to the usable bits in the PTE it is set so 207 * that the valid bits _PAGE_PRESENT_BIT and _PAGE_USER_BIT are set 208 * to zero */ 209#define PTE_SHIFT xlate_pabit(_PAGE_USER_BIT) 210 211/* PFN_PTE_SHIFT defines the shift of a PTE value to access the PFN field */ 212#define PFN_PTE_SHIFT 12 213 214#define _PAGE_READ (1 << xlate_pabit(_PAGE_READ_BIT)) 215#define _PAGE_WRITE (1 << xlate_pabit(_PAGE_WRITE_BIT)) 216#define _PAGE_RW (_PAGE_READ | _PAGE_WRITE) 217#define _PAGE_EXEC (1 << xlate_pabit(_PAGE_EXEC_BIT)) 218#define _PAGE_GATEWAY (1 << xlate_pabit(_PAGE_GATEWAY_BIT)) 219#define _PAGE_DMB (1 << xlate_pabit(_PAGE_DMB_BIT)) 220#define _PAGE_DIRTY (1 << xlate_pabit(_PAGE_DIRTY_BIT)) 221#define _PAGE_REFTRAP (1 << xlate_pabit(_PAGE_REFTRAP_BIT)) 222#define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT)) 223#define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT)) 224#define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT)) 225#define _PAGE_HUGE (1 << xlate_pabit(_PAGE_HPAGE_BIT)) 226#define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT)) 227#define _PAGE_SPECIAL (1 << xlate_pabit(_PAGE_SPECIAL_BIT)) 228 229#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED) 230#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL) 231#define _PAGE_KERNEL_RO (_PAGE_PRESENT | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED) 232#define _PAGE_KERNEL_EXEC (_PAGE_KERNEL_RO | _PAGE_EXEC) 233#define _PAGE_KERNEL_RWX (_PAGE_KERNEL_EXEC | _PAGE_WRITE) 234#define _PAGE_KERNEL (_PAGE_KERNEL_RO | _PAGE_WRITE) 235 236/* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds 237 * are page-aligned, we don't care about the PAGE_OFFSET bits, except 238 * for a few meta-information bits, so we shift the address to be 239 * able to effectively address 40/42/44-bits of physical address space 240 * depending on 4k/16k/64k PAGE_SIZE */ 241#define _PxD_PRESENT_BIT 31 242#define _PxD_VALID_BIT 30 243 244#define PxD_FLAG_PRESENT (1 << xlate_pabit(_PxD_PRESENT_BIT)) 245#define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT)) 246#define PxD_FLAG_MASK (0xf) 247#define PxD_FLAG_SHIFT (4) 248#define PxD_VALUE_SHIFT (PFN_PTE_SHIFT-PxD_FLAG_SHIFT) 249 250#ifndef __ASSEMBLY__ 251 252#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_USER) 253#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE) 254/* Others seem to make this executable, I don't know if that's correct 255 or not. The stack is mapped this way though so this is necessary 256 in the short term - dhd@linuxcare.com, 2000-08-08 */ 257#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ) 258#define PAGE_WRITEONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITE) 259#define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC) 260#define PAGE_COPY PAGE_EXECREAD 261#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC) 262#define PAGE_KERNEL __pgprot(_PAGE_KERNEL) 263#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC) 264#define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX) 265#define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL_RO) 266#define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE) 267#define PAGE_GATEWAY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_GATEWAY| _PAGE_READ) 268 269 270/* 271 * We could have an execute only page using "gateway - promote to priv 272 * level 3", but that is kind of silly. So, the way things are defined 273 * now, we must always have read permission for pages with execute 274 * permission. For the fun of it we'll go ahead and support write only 275 * pages. 276 */ 277 278 /*xwr*/ 279 280extern pgd_t swapper_pg_dir[]; /* declared in init_task.c */ 281 282/* initial page tables for 0-8MB for kernel */ 283 284extern pte_t pg0[]; 285 286/* zero page used for uninitialized stuff */ 287 288extern unsigned long *empty_zero_page; 289 290/* 291 * ZERO_PAGE is a global shared page that is always zero: used 292 * for zero-mapped memory areas etc.. 293 */ 294 295#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 296 297#define pte_none(x) (pte_val(x) == 0) 298#define pte_present(x) (pte_val(x) & _PAGE_PRESENT) 299#define pte_user(x) (pte_val(x) & _PAGE_USER) 300#define pte_clear(mm, addr, xp) set_pte_at(mm, addr, xp, __pte(0)) 301 302#define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK) 303#define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) 304#define pud_flag(x) (pud_val(x) & PxD_FLAG_MASK) 305#define pud_address(x) ((unsigned long)(pud_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) 306#define pgd_flag(x) (pgd_val(x) & PxD_FLAG_MASK) 307#define pgd_address(x) ((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) 308 309#define pmd_none(x) (!pmd_val(x)) 310#define pmd_bad(x) (!(pmd_flag(x) & PxD_FLAG_VALID)) 311#define pmd_present(x) (pmd_flag(x) & PxD_FLAG_PRESENT) 312static inline void pmd_clear(pmd_t *pmd) { 313 set_pmd(pmd, __pmd(0)); 314} 315 316 317 318#if CONFIG_PGTABLE_LEVELS == 3 319#define pud_pgtable(pud) ((pmd_t *) __va(pud_address(pud))) 320#define pud_page(pud) virt_to_page((void *)pud_pgtable(pud)) 321 322/* For 64 bit we have three level tables */ 323 324#define pud_none(x) (!pud_val(x)) 325#define pud_bad(x) (!(pud_flag(x) & PxD_FLAG_VALID)) 326#define pud_present(x) (pud_flag(x) & PxD_FLAG_PRESENT) 327static inline void pud_clear(pud_t *pud) { 328 set_pud(pud, __pud(0)); 329} 330#endif 331 332/* 333 * The following only work if pte_present() is true. 334 * Undefined behaviour if not.. 335 */ 336static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } 337static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 338static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } 339static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } 340 341static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; } 342static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } 343static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_WRITE; return pte; } 344static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; } 345static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; } 346static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; } 347static inline pte_t pte_mkspecial(pte_t pte) { pte_val(pte) |= _PAGE_SPECIAL; return pte; } 348 349/* 350 * Huge pte definitions. 351 */ 352#ifdef CONFIG_HUGETLB_PAGE 353#define pte_huge(pte) (pte_val(pte) & _PAGE_HUGE) 354#define pte_mkhuge(pte) (__pte(pte_val(pte) | \ 355 (parisc_requires_coherency() ? 0 : _PAGE_HUGE))) 356#else 357#define pte_huge(pte) (0) 358#define pte_mkhuge(pte) (pte) 359#endif 360 361 362/* 363 * Conversion functions: convert a page and protection to a page entry, 364 * and a page entry and page directory to the page they refer to. 365 */ 366#define __mk_pte(addr,pgprot) \ 367({ \ 368 pte_t __pte; \ 369 \ 370 pte_val(__pte) = ((((addr)>>PAGE_SHIFT)<<PFN_PTE_SHIFT) + pgprot_val(pgprot)); \ 371 \ 372 __pte; \ 373}) 374 375#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 376 377static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) 378{ 379 pte_t pte; 380 pte_val(pte) = (pfn << PFN_PTE_SHIFT) | pgprot_val(pgprot); 381 return pte; 382} 383 384static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 385{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; } 386 387/* Permanent address of a page. On parisc we don't have highmem. */ 388 389#define pte_pfn(x) (pte_val(x) >> PFN_PTE_SHIFT) 390 391#define pte_page(pte) (pfn_to_page(pte_pfn(pte))) 392 393static inline unsigned long pmd_page_vaddr(pmd_t pmd) 394{ 395 return ((unsigned long) __va(pmd_address(pmd))); 396} 397 398#define pmd_pfn(pmd) (pmd_address(pmd) >> PAGE_SHIFT) 399#define __pmd_page(pmd) ((unsigned long) __va(pmd_address(pmd))) 400#define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd)) 401 402/* Find an entry in the second-level page table.. */ 403 404extern void paging_init (void); 405 406/* Used for deferring calls to flush_dcache_page() */ 407 408#define PG_dcache_dirty PG_arch_1 409 410#define update_mmu_cache(vms,addr,ptep) __update_cache(*ptep) 411 412/* Encode and de-code a swap entry */ 413 414#define __swp_type(x) ((x).val & 0x1f) 415#define __swp_offset(x) ( (((x).val >> 6) & 0x7) | \ 416 (((x).val >> 8) & ~0x7) ) 417#define __swp_entry(type, offset) ((swp_entry_t) { (type) | \ 418 ((offset & 0x7) << 6) | \ 419 ((offset & ~0x7) << 8) }) 420#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 421#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 422 423static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) 424{ 425 pte_t pte; 426 427 if (!pte_young(*ptep)) 428 return 0; 429 430 pte = *ptep; 431 if (!pte_young(pte)) { 432 return 0; 433 } 434 set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte)); 435 return 1; 436} 437 438struct mm_struct; 439static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 440{ 441 pte_t old_pte; 442 443 old_pte = *ptep; 444 set_pte_at(mm, addr, ptep, __pte(0)); 445 446 return old_pte; 447} 448 449static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 450{ 451 set_pte_at(mm, addr, ptep, pte_wrprotect(*ptep)); 452} 453 454#define pte_same(A,B) (pte_val(A) == pte_val(B)) 455 456struct seq_file; 457extern void arch_report_meminfo(struct seq_file *m); 458 459#endif /* !__ASSEMBLY__ */ 460 461 462/* TLB page size encoding - see table 3-1 in parisc20.pdf */ 463#define _PAGE_SIZE_ENCODING_4K 0 464#define _PAGE_SIZE_ENCODING_16K 1 465#define _PAGE_SIZE_ENCODING_64K 2 466#define _PAGE_SIZE_ENCODING_256K 3 467#define _PAGE_SIZE_ENCODING_1M 4 468#define _PAGE_SIZE_ENCODING_4M 5 469#define _PAGE_SIZE_ENCODING_16M 6 470#define _PAGE_SIZE_ENCODING_64M 7 471 472#if defined(CONFIG_PARISC_PAGE_SIZE_4KB) 473# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_4K 474#elif defined(CONFIG_PARISC_PAGE_SIZE_16KB) 475# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_16K 476#elif defined(CONFIG_PARISC_PAGE_SIZE_64KB) 477# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_64K 478#endif 479 480 481#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_NO_CACHE) 482 483/* We provide our own get_unmapped_area to provide cache coherency */ 484 485#define HAVE_ARCH_UNMAPPED_AREA 486#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 487 488#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 489#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 490#define __HAVE_ARCH_PTEP_SET_WRPROTECT 491#define __HAVE_ARCH_PTE_SAME 492 493#endif /* _PARISC_PGTABLE_H */