at v2.6.14-rc2 368 lines 14 kB view raw
1#ifndef _ALPHA_PGTABLE_H 2#define _ALPHA_PGTABLE_H 3 4#include <asm-generic/4level-fixup.h> 5 6/* 7 * This file contains the functions and defines necessary to modify and use 8 * the Alpha page table tree. 9 * 10 * This hopefully works with any standard Alpha page-size, as defined 11 * in <asm/page.h> (currently 8192). 12 */ 13#include <linux/config.h> 14#include <linux/mmzone.h> 15 16#include <asm/page.h> 17#include <asm/processor.h> /* For TASK_SIZE */ 18#include <asm/machvec.h> 19 20/* Certain architectures need to do special things when PTEs 21 * within a page table are directly modified. Thus, the following 22 * hook is made available. 23 */ 24#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval)) 25#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) 26 27/* PMD_SHIFT determines the size of the area a second-level page table can map */ 28#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3)) 29#define PMD_SIZE (1UL << PMD_SHIFT) 30#define PMD_MASK (~(PMD_SIZE-1)) 31 32/* PGDIR_SHIFT determines what a third-level page table entry can map */ 33#define PGDIR_SHIFT (PAGE_SHIFT + 2*(PAGE_SHIFT-3)) 34#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 35#define PGDIR_MASK (~(PGDIR_SIZE-1)) 36 37/* 38 * Entries per page directory level: the Alpha is three-level, with 39 * all levels having a one-page page table. 40 */ 41#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3)) 42#define PTRS_PER_PMD (1UL << (PAGE_SHIFT-3)) 43#define PTRS_PER_PGD (1UL << (PAGE_SHIFT-3)) 44#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) 45#define FIRST_USER_ADDRESS 0 46 47/* Number of pointers that fit on a page: this will go away. */ 48#define PTRS_PER_PAGE (1UL << (PAGE_SHIFT-3)) 49 50#ifdef CONFIG_ALPHA_LARGE_VMALLOC 51#define VMALLOC_START 0xfffffe0000000000 52#else 53#define VMALLOC_START (-2*PGDIR_SIZE) 54#endif 55#define VMALLOC_END (-PGDIR_SIZE) 56 57/* 58 * OSF/1 PAL-code-imposed page table bits 59 */ 60#define _PAGE_VALID 0x0001 61#define _PAGE_FOR 0x0002 /* used for page protection (fault on read) */ 62#define _PAGE_FOW 0x0004 /* used for page protection (fault on write) */ 63#define _PAGE_FOE 0x0008 /* used for page protection (fault on exec) */ 64#define _PAGE_ASM 0x0010 65#define _PAGE_KRE 0x0100 /* xxx - see below on the "accessed" bit */ 66#define _PAGE_URE 0x0200 /* xxx */ 67#define _PAGE_KWE 0x1000 /* used to do the dirty bit in software */ 68#define _PAGE_UWE 0x2000 /* used to do the dirty bit in software */ 69 70/* .. and these are ours ... */ 71#define _PAGE_DIRTY 0x20000 72#define _PAGE_ACCESSED 0x40000 73#define _PAGE_FILE 0x80000 /* set:pagecache, unset:swap */ 74 75/* 76 * NOTE! The "accessed" bit isn't necessarily exact: it can be kept exactly 77 * by software (use the KRE/URE/KWE/UWE bits appropriately), but I'll fake it. 78 * Under Linux/AXP, the "accessed" bit just means "read", and I'll just use 79 * the KRE/URE bits to watch for it. That way we don't need to overload the 80 * KWE/UWE bits with both handling dirty and accessed. 81 * 82 * Note that the kernel uses the accessed bit just to check whether to page 83 * out a page or not, so it doesn't have to be exact anyway. 84 */ 85 86#define __DIRTY_BITS (_PAGE_DIRTY | _PAGE_KWE | _PAGE_UWE) 87#define __ACCESS_BITS (_PAGE_ACCESSED | _PAGE_KRE | _PAGE_URE) 88 89#define _PFN_MASK 0xFFFFFFFF00000000UL 90 91#define _PAGE_TABLE (_PAGE_VALID | __DIRTY_BITS | __ACCESS_BITS) 92#define _PAGE_CHG_MASK (_PFN_MASK | __DIRTY_BITS | __ACCESS_BITS) 93 94/* 95 * All the normal masks have the "page accessed" bits on, as any time they are used, 96 * the page is accessed. They are cleared only by the page-out routines 97 */ 98#define PAGE_NONE __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOR | _PAGE_FOW | _PAGE_FOE) 99#define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS) 100#define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) 101#define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) 102#define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE) 103 104#define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x)) 105 106#define _PAGE_P(x) _PAGE_NORMAL((x) | (((x) & _PAGE_FOW)?0:_PAGE_FOW)) 107#define _PAGE_S(x) _PAGE_NORMAL(x) 108 109/* 110 * The hardware can handle write-only mappings, but as the Alpha 111 * architecture does byte-wide writes with a read-modify-write 112 * sequence, it's not practical to have write-without-read privs. 113 * Thus the "-w- -> rw-" and "-wx -> rwx" mapping here (and in 114 * arch/alpha/mm/fault.c) 115 */ 116 /* xwr */ 117#define __P000 _PAGE_P(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR) 118#define __P001 _PAGE_P(_PAGE_FOE | _PAGE_FOW) 119#define __P010 _PAGE_P(_PAGE_FOE) 120#define __P011 _PAGE_P(_PAGE_FOE) 121#define __P100 _PAGE_P(_PAGE_FOW | _PAGE_FOR) 122#define __P101 _PAGE_P(_PAGE_FOW) 123#define __P110 _PAGE_P(0) 124#define __P111 _PAGE_P(0) 125 126#define __S000 _PAGE_S(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR) 127#define __S001 _PAGE_S(_PAGE_FOE | _PAGE_FOW) 128#define __S010 _PAGE_S(_PAGE_FOE) 129#define __S011 _PAGE_S(_PAGE_FOE) 130#define __S100 _PAGE_S(_PAGE_FOW | _PAGE_FOR) 131#define __S101 _PAGE_S(_PAGE_FOW) 132#define __S110 _PAGE_S(0) 133#define __S111 _PAGE_S(0) 134 135/* 136 * pgprot_noncached() is only for infiniband pci support, and a real 137 * implementation for RAM would be more complicated. 138 */ 139#define pgprot_noncached(prot) (prot) 140 141/* 142 * BAD_PAGETABLE is used when we need a bogus page-table, while 143 * BAD_PAGE is used for a bogus page. 144 * 145 * ZERO_PAGE is a global shared page that is always zero: used 146 * for zero-mapped memory areas etc.. 147 */ 148extern pte_t __bad_page(void); 149extern pmd_t * __bad_pagetable(void); 150 151extern unsigned long __zero_page(void); 152 153#define BAD_PAGETABLE __bad_pagetable() 154#define BAD_PAGE __bad_page() 155#define ZERO_PAGE(vaddr) (virt_to_page(ZERO_PGE)) 156 157/* number of bits that fit into a memory pointer */ 158#define BITS_PER_PTR (8*sizeof(unsigned long)) 159 160/* to align the pointer to a pointer address */ 161#define PTR_MASK (~(sizeof(void*)-1)) 162 163/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */ 164#define SIZEOF_PTR_LOG2 3 165 166/* to find an entry in a page-table */ 167#define PAGE_PTR(address) \ 168 ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK) 169 170/* 171 * On certain platforms whose physical address space can overlap KSEG, 172 * namely EV6 and above, we must re-twiddle the physaddr to restore the 173 * correct high-order bits. 174 * 175 * This is extremely confusing until you realize that this is actually 176 * just working around a userspace bug. The X server was intending to 177 * provide the physical address but instead provided the KSEG address. 178 * Or tried to, except it's not representable. 179 * 180 * On Tsunami there's nothing meaningful at 0x40000000000, so this is 181 * a safe thing to do. Come the first core logic that does put something 182 * in this area -- memory or whathaveyou -- then this hack will have 183 * to go away. So be prepared! 184 */ 185 186#if defined(CONFIG_ALPHA_GENERIC) && defined(USE_48_BIT_KSEG) 187#error "EV6-only feature in a generic kernel" 188#endif 189#if defined(CONFIG_ALPHA_GENERIC) || \ 190 (defined(CONFIG_ALPHA_EV6) && !defined(USE_48_BIT_KSEG)) 191#define KSEG_PFN (0xc0000000000UL >> PAGE_SHIFT) 192#define PHYS_TWIDDLE(pfn) \ 193 ((((pfn) & KSEG_PFN) == (0x40000000000UL >> PAGE_SHIFT)) \ 194 ? ((pfn) ^= KSEG_PFN) : (pfn)) 195#else 196#define PHYS_TWIDDLE(pfn) (pfn) 197#endif 198 199/* 200 * Conversion functions: convert a page and protection to a page entry, 201 * and a page entry and page directory to the page they refer to. 202 */ 203#ifndef CONFIG_DISCONTIGMEM 204#define page_to_pa(page) (((page) - mem_map) << PAGE_SHIFT) 205 206#define pte_pfn(pte) (pte_val(pte) >> 32) 207#define pte_page(pte) pfn_to_page(pte_pfn(pte)) 208#define mk_pte(page, pgprot) \ 209({ \ 210 pte_t pte; \ 211 \ 212 pte_val(pte) = (page_to_pfn(page) << 32) | pgprot_val(pgprot); \ 213 pte; \ 214}) 215#endif 216 217extern inline pte_t pfn_pte(unsigned long physpfn, pgprot_t pgprot) 218{ pte_t pte; pte_val(pte) = (PHYS_TWIDDLE(physpfn) << 32) | pgprot_val(pgprot); return pte; } 219 220extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 221{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; } 222 223extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep) 224{ pmd_val(*pmdp) = _PAGE_TABLE | ((((unsigned long) ptep) - PAGE_OFFSET) << (32-PAGE_SHIFT)); } 225 226extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp) 227{ pgd_val(*pgdp) = _PAGE_TABLE | ((((unsigned long) pmdp) - PAGE_OFFSET) << (32-PAGE_SHIFT)); } 228 229 230extern inline unsigned long 231pmd_page_kernel(pmd_t pmd) 232{ 233 return ((pmd_val(pmd) & _PFN_MASK) >> (32-PAGE_SHIFT)) + PAGE_OFFSET; 234} 235 236#ifndef CONFIG_DISCONTIGMEM 237#define pmd_page(pmd) (mem_map + ((pmd_val(pmd) & _PFN_MASK) >> 32)) 238#endif 239 240extern inline unsigned long pgd_page(pgd_t pgd) 241{ return PAGE_OFFSET + ((pgd_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT)); } 242 243extern inline int pte_none(pte_t pte) { return !pte_val(pte); } 244extern inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_VALID; } 245extern inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 246{ 247 pte_val(*ptep) = 0; 248} 249 250extern inline int pmd_none(pmd_t pmd) { return !pmd_val(pmd); } 251extern inline int pmd_bad(pmd_t pmd) { return (pmd_val(pmd) & ~_PFN_MASK) != _PAGE_TABLE; } 252extern inline int pmd_present(pmd_t pmd) { return pmd_val(pmd) & _PAGE_VALID; } 253extern inline void pmd_clear(pmd_t * pmdp) { pmd_val(*pmdp) = 0; } 254 255extern inline int pgd_none(pgd_t pgd) { return !pgd_val(pgd); } 256extern inline int pgd_bad(pgd_t pgd) { return (pgd_val(pgd) & ~_PFN_MASK) != _PAGE_TABLE; } 257extern inline int pgd_present(pgd_t pgd) { return pgd_val(pgd) & _PAGE_VALID; } 258extern inline void pgd_clear(pgd_t * pgdp) { pgd_val(*pgdp) = 0; } 259 260/* 261 * The following only work if pte_present() is true. 262 * Undefined behaviour if not.. 263 */ 264extern inline int pte_read(pte_t pte) { return !(pte_val(pte) & _PAGE_FOR); } 265extern inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_FOW); } 266extern inline int pte_exec(pte_t pte) { return !(pte_val(pte) & _PAGE_FOE); } 267extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } 268extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 269extern inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } 270 271extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOW; return pte; } 272extern inline pte_t pte_rdprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOR; return pte; } 273extern inline pte_t pte_exprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOE; return pte; } 274extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~(__DIRTY_BITS); return pte; } 275extern inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~(__ACCESS_BITS); return pte; } 276extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) &= ~_PAGE_FOW; return pte; } 277extern inline pte_t pte_mkread(pte_t pte) { pte_val(pte) &= ~_PAGE_FOR; return pte; } 278extern inline pte_t pte_mkexec(pte_t pte) { pte_val(pte) &= ~_PAGE_FOE; return pte; } 279extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= __DIRTY_BITS; return pte; } 280extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= __ACCESS_BITS; return pte; } 281 282#define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address)) 283 284/* to find an entry in a kernel page-table-directory */ 285#define pgd_offset_k(address) pgd_offset(&init_mm, (address)) 286 287/* to find an entry in a page-table-directory. */ 288#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) 289#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) 290 291/* Find an entry in the second-level page table.. */ 292extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) 293{ 294 return (pmd_t *) pgd_page(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1)); 295} 296 297/* Find an entry in the third-level page table.. */ 298extern inline pte_t * pte_offset_kernel(pmd_t * dir, unsigned long address) 299{ 300 return (pte_t *) pmd_page_kernel(*dir) 301 + ((address >> PAGE_SHIFT) & (PTRS_PER_PAGE - 1)); 302} 303 304#define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr)) 305#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir),(addr)) 306#define pte_unmap(pte) do { } while (0) 307#define pte_unmap_nested(pte) do { } while (0) 308 309extern pgd_t swapper_pg_dir[1024]; 310 311/* 312 * The Alpha doesn't have any external MMU info: the kernel page 313 * tables contain all the necessary information. 314 */ 315extern inline void update_mmu_cache(struct vm_area_struct * vma, 316 unsigned long address, pte_t pte) 317{ 318} 319 320/* 321 * Non-present pages: high 24 bits are offset, next 8 bits type, 322 * low 32 bits zero. 323 */ 324extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) 325{ pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; } 326 327#define __swp_type(x) (((x).val >> 32) & 0xff) 328#define __swp_offset(x) ((x).val >> 40) 329#define __swp_entry(type, off) ((swp_entry_t) { pte_val(mk_swap_pte((type), (off))) }) 330#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 331#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 332 333#define pte_to_pgoff(pte) (pte_val(pte) >> 32) 334#define pgoff_to_pte(off) ((pte_t) { ((off) << 32) | _PAGE_FILE }) 335 336#define PTE_FILE_MAX_BITS 32 337 338#ifndef CONFIG_DISCONTIGMEM 339#define kern_addr_valid(addr) (1) 340#endif 341 342#define io_remap_pfn_range(vma, start, pfn, size, prot) \ 343 remap_pfn_range(vma, start, pfn, size, prot) 344 345#define MK_IOSPACE_PFN(space, pfn) (pfn) 346#define GET_IOSPACE(pfn) 0 347#define GET_PFN(pfn) (pfn) 348 349#define pte_ERROR(e) \ 350 printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) 351#define pmd_ERROR(e) \ 352 printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e)) 353#define pgd_ERROR(e) \ 354 printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) 355 356extern void paging_init(void); 357 358#include <asm-generic/pgtable.h> 359 360/* 361 * No page table caches to initialise 362 */ 363#define pgtable_cache_init() do { } while (0) 364 365/* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT. */ 366#define HAVE_ARCH_UNMAPPED_AREA 367 368#endif /* _ALPHA_PGTABLE_H */