at v2.6.22 404 lines 13 kB view raw
1/* 2 * Copyright (C) 2004-2006 Atmel Corporation 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 */ 8#ifndef __ASM_AVR32_PGTABLE_H 9#define __ASM_AVR32_PGTABLE_H 10 11#include <asm/addrspace.h> 12 13#ifndef __ASSEMBLY__ 14#include <linux/sched.h> 15 16#endif /* !__ASSEMBLY__ */ 17 18/* 19 * Use two-level page tables just as the i386 (without PAE) 20 */ 21#include <asm/pgtable-2level.h> 22 23/* 24 * The following code might need some cleanup when the values are 25 * final... 26 */ 27#define PMD_SIZE (1UL << PMD_SHIFT) 28#define PMD_MASK (~(PMD_SIZE-1)) 29#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 30#define PGDIR_MASK (~(PGDIR_SIZE-1)) 31 32#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) 33#define FIRST_USER_ADDRESS 0 34 35#define PTE_PHYS_MASK 0x1ffff000 36 37#ifndef __ASSEMBLY__ 38extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 39extern void paging_init(void); 40 41/* 42 * ZERO_PAGE is a global shared page that is always zero: used for 43 * zero-mapped memory areas etc. 44 */ 45extern struct page *empty_zero_page; 46#define ZERO_PAGE(vaddr) (empty_zero_page) 47 48/* 49 * Just any arbitrary offset to the start of the vmalloc VM area: the 50 * current 8 MiB value just means that there will be a 8 MiB "hole" 51 * after the uncached physical memory (P2 segment) until the vmalloc 52 * area starts. That means that any out-of-bounds memory accesses will 53 * hopefully be caught; we don't know if the end of the P1/P2 segments 54 * are actually used for anything, but it is anyway safer to let the 55 * MMU catch these kinds of errors than to rely on the memory bus. 56 * 57 * A "hole" of the same size is added to the end of the P3 segment as 58 * well. It might seem wasteful to use 16 MiB of virtual address space 59 * on this, but we do have 512 MiB of it... 60 * 61 * The vmalloc() routines leave a hole of 4 KiB between each vmalloced 62 * area for the same reason. 63 */ 64#define VMALLOC_OFFSET (8 * 1024 * 1024) 65#define VMALLOC_START (P3SEG + VMALLOC_OFFSET) 66#define VMALLOC_END (P4SEG - VMALLOC_OFFSET) 67#endif /* !__ASSEMBLY__ */ 68 69/* 70 * Page flags. Some of these flags are not directly supported by 71 * hardware, so we have to emulate them. 72 */ 73#define _TLBEHI_BIT_VALID 9 74#define _TLBEHI_VALID (1 << _TLBEHI_BIT_VALID) 75 76#define _PAGE_BIT_WT 0 /* W-bit : write-through */ 77#define _PAGE_BIT_DIRTY 1 /* D-bit : page changed */ 78#define _PAGE_BIT_SZ0 2 /* SZ0-bit : Size of page */ 79#define _PAGE_BIT_SZ1 3 /* SZ1-bit : Size of page */ 80#define _PAGE_BIT_EXECUTE 4 /* X-bit : execute access allowed */ 81#define _PAGE_BIT_RW 5 /* AP0-bit : write access allowed */ 82#define _PAGE_BIT_USER 6 /* AP1-bit : user space access allowed */ 83#define _PAGE_BIT_BUFFER 7 /* B-bit : bufferable */ 84#define _PAGE_BIT_GLOBAL 8 /* G-bit : global (ignore ASID) */ 85#define _PAGE_BIT_CACHABLE 9 /* C-bit : cachable */ 86 87/* If we drop support for 1K pages, we get two extra bits */ 88#define _PAGE_BIT_PRESENT 10 89#define _PAGE_BIT_ACCESSED 11 /* software: page was accessed */ 90 91/* The following flags are only valid when !PRESENT */ 92#define _PAGE_BIT_FILE 0 /* software: pagecache or swap? */ 93 94#define _PAGE_WT (1 << _PAGE_BIT_WT) 95#define _PAGE_DIRTY (1 << _PAGE_BIT_DIRTY) 96#define _PAGE_EXECUTE (1 << _PAGE_BIT_EXECUTE) 97#define _PAGE_RW (1 << _PAGE_BIT_RW) 98#define _PAGE_USER (1 << _PAGE_BIT_USER) 99#define _PAGE_BUFFER (1 << _PAGE_BIT_BUFFER) 100#define _PAGE_GLOBAL (1 << _PAGE_BIT_GLOBAL) 101#define _PAGE_CACHABLE (1 << _PAGE_BIT_CACHABLE) 102 103/* Software flags */ 104#define _PAGE_ACCESSED (1 << _PAGE_BIT_ACCESSED) 105#define _PAGE_PRESENT (1 << _PAGE_BIT_PRESENT) 106#define _PAGE_FILE (1 << _PAGE_BIT_FILE) 107 108/* 109 * Page types, i.e. sizes. _PAGE_TYPE_NONE corresponds to what is 110 * usually called _PAGE_PROTNONE on other architectures. 111 * 112 * XXX: Find out if _PAGE_PROTNONE is equivalent with !_PAGE_USER. If 113 * so, we can encode all possible page sizes (although we can't really 114 * support 1K pages anyway due to the _PAGE_PRESENT and _PAGE_ACCESSED 115 * bits) 116 * 117 */ 118#define _PAGE_TYPE_MASK ((1 << _PAGE_BIT_SZ0) | (1 << _PAGE_BIT_SZ1)) 119#define _PAGE_TYPE_NONE (0 << _PAGE_BIT_SZ0) 120#define _PAGE_TYPE_SMALL (1 << _PAGE_BIT_SZ0) 121#define _PAGE_TYPE_MEDIUM (2 << _PAGE_BIT_SZ0) 122#define _PAGE_TYPE_LARGE (3 << _PAGE_BIT_SZ0) 123 124/* 125 * Mask which drop software flags. We currently can't handle more than 126 * 512 MiB of physical memory, so we can use bits 29-31 for other 127 * stuff. With a fixed 4K page size, we can use bits 10-11 as well as 128 * bits 2-3 (SZ) 129 */ 130#define _PAGE_FLAGS_HARDWARE_MASK 0xfffff3ff 131 132#define _PAGE_FLAGS_CACHE_MASK (_PAGE_CACHABLE | _PAGE_BUFFER | _PAGE_WT) 133 134/* TODO: Check for saneness */ 135/* User-mode page table flags (to be set in a pgd or pmd entry) */ 136#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_TYPE_SMALL | _PAGE_RW \ 137 | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) 138/* Kernel-mode page table flags */ 139#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_TYPE_SMALL | _PAGE_RW \ 140 | _PAGE_ACCESSED | _PAGE_DIRTY) 141/* Flags that may be modified by software */ 142#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY \ 143 | _PAGE_FLAGS_CACHE_MASK) 144 145#define _PAGE_FLAGS_READ (_PAGE_CACHABLE | _PAGE_BUFFER) 146#define _PAGE_FLAGS_WRITE (_PAGE_FLAGS_READ | _PAGE_RW | _PAGE_DIRTY) 147 148#define _PAGE_NORMAL(x) __pgprot((x) | _PAGE_PRESENT | _PAGE_TYPE_SMALL \ 149 | _PAGE_ACCESSED) 150 151#define PAGE_NONE (_PAGE_ACCESSED | _PAGE_TYPE_NONE) 152#define PAGE_READ (_PAGE_FLAGS_READ | _PAGE_USER) 153#define PAGE_EXEC (_PAGE_FLAGS_READ | _PAGE_EXECUTE | _PAGE_USER) 154#define PAGE_WRITE (_PAGE_FLAGS_WRITE | _PAGE_USER) 155#define PAGE_KERNEL _PAGE_NORMAL(_PAGE_FLAGS_WRITE | _PAGE_EXECUTE | _PAGE_GLOBAL) 156#define PAGE_KERNEL_RO _PAGE_NORMAL(_PAGE_FLAGS_READ | _PAGE_EXECUTE | _PAGE_GLOBAL) 157 158#define _PAGE_P(x) _PAGE_NORMAL((x) & ~(_PAGE_RW | _PAGE_DIRTY)) 159#define _PAGE_S(x) _PAGE_NORMAL(x) 160 161#define PAGE_COPY _PAGE_P(PAGE_WRITE | PAGE_READ) 162 163#ifndef __ASSEMBLY__ 164/* 165 * The hardware supports flags for write- and execute access. Read is 166 * always allowed if the page is loaded into the TLB, so the "-w-", 167 * "--x" and "-wx" mappings are implemented as "rw-", "r-x" and "rwx", 168 * respectively. 169 * 170 * The "---" case is handled by software; the page will simply not be 171 * loaded into the TLB if the page type is _PAGE_TYPE_NONE. 172 */ 173 174#define __P000 __pgprot(PAGE_NONE) 175#define __P001 _PAGE_P(PAGE_READ) 176#define __P010 _PAGE_P(PAGE_WRITE) 177#define __P011 _PAGE_P(PAGE_WRITE | PAGE_READ) 178#define __P100 _PAGE_P(PAGE_EXEC) 179#define __P101 _PAGE_P(PAGE_EXEC | PAGE_READ) 180#define __P110 _PAGE_P(PAGE_EXEC | PAGE_WRITE) 181#define __P111 _PAGE_P(PAGE_EXEC | PAGE_WRITE | PAGE_READ) 182 183#define __S000 __pgprot(PAGE_NONE) 184#define __S001 _PAGE_S(PAGE_READ) 185#define __S010 _PAGE_S(PAGE_WRITE) 186#define __S011 _PAGE_S(PAGE_WRITE | PAGE_READ) 187#define __S100 _PAGE_S(PAGE_EXEC) 188#define __S101 _PAGE_S(PAGE_EXEC | PAGE_READ) 189#define __S110 _PAGE_S(PAGE_EXEC | PAGE_WRITE) 190#define __S111 _PAGE_S(PAGE_EXEC | PAGE_WRITE | PAGE_READ) 191 192#define pte_none(x) (!pte_val(x)) 193#define pte_present(x) (pte_val(x) & _PAGE_PRESENT) 194 195#define pte_clear(mm,addr,xp) \ 196 do { \ 197 set_pte_at(mm, addr, xp, __pte(0)); \ 198 } while (0) 199 200/* 201 * The following only work if pte_present() is true. 202 * Undefined behaviour if not.. 203 */ 204static inline int pte_read(pte_t pte) 205{ 206 return pte_val(pte) & _PAGE_USER; 207} 208static inline int pte_write(pte_t pte) 209{ 210 return pte_val(pte) & _PAGE_RW; 211} 212static inline int pte_exec(pte_t pte) 213{ 214 return pte_val(pte) & _PAGE_EXECUTE; 215} 216static inline int pte_dirty(pte_t pte) 217{ 218 return pte_val(pte) & _PAGE_DIRTY; 219} 220static inline int pte_young(pte_t pte) 221{ 222 return pte_val(pte) & _PAGE_ACCESSED; 223} 224 225/* 226 * The following only work if pte_present() is not true. 227 */ 228static inline int pte_file(pte_t pte) 229{ 230 return pte_val(pte) & _PAGE_FILE; 231} 232 233/* Mutator functions for PTE bits */ 234static inline pte_t pte_rdprotect(pte_t pte) 235{ 236 set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); 237 return pte; 238} 239static inline pte_t pte_wrprotect(pte_t pte) 240{ 241 set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_RW)); 242 return pte; 243} 244static inline pte_t pte_exprotect(pte_t pte) 245{ 246 set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_EXECUTE)); 247 return pte; 248} 249static inline pte_t pte_mkclean(pte_t pte) 250{ 251 set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); 252 return pte; 253} 254static inline pte_t pte_mkold(pte_t pte) 255{ 256 set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); 257 return pte; 258} 259static inline pte_t pte_mkread(pte_t pte) 260{ 261 set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); 262 return pte; 263} 264static inline pte_t pte_mkwrite(pte_t pte) 265{ 266 set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); 267 return pte; 268} 269static inline pte_t pte_mkexec(pte_t pte) 270{ 271 set_pte(&pte, __pte(pte_val(pte) | _PAGE_EXECUTE)); 272 return pte; 273} 274static inline pte_t pte_mkdirty(pte_t pte) 275{ 276 set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); 277 return pte; 278} 279static inline pte_t pte_mkyoung(pte_t pte) 280{ 281 set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); 282 return pte; 283} 284 285#define pmd_none(x) (!pmd_val(x)) 286#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) 287#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) 288#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) \ 289 != _KERNPG_TABLE) 290 291/* 292 * Permanent address of a page. We don't support highmem, so this is 293 * trivial. 294 */ 295#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) 296#define pte_page(x) phys_to_page(pte_val(x) & PTE_PHYS_MASK) 297 298/* 299 * Mark the prot value as uncacheable and unbufferable 300 */ 301#define pgprot_noncached(prot) \ 302 __pgprot(pgprot_val(prot) & ~(_PAGE_BUFFER | _PAGE_CACHABLE)) 303 304/* 305 * Mark the prot value as uncacheable but bufferable 306 */ 307#define pgprot_writecombine(prot) \ 308 __pgprot((pgprot_val(prot) & ~_PAGE_CACHABLE) | _PAGE_BUFFER) 309 310/* 311 * Conversion functions: convert a page and protection to a page entry, 312 * and a page entry and page directory to the page they refer to. 313 * 314 * extern pte_t mk_pte(struct page *page, pgprot_t pgprot) 315 */ 316#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 317 318static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 319{ 320 set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) 321 | pgprot_val(newprot))); 322 return pte; 323} 324 325#define page_pte(page) page_pte_prot(page, __pgprot(0)) 326 327#define pmd_page_vaddr(pmd) \ 328 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) 329 330#define pmd_page(pmd) (phys_to_page(pmd_val(pmd))) 331 332/* to find an entry in a page-table-directory. */ 333#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) 334#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) 335#define pgd_offset_current(address) \ 336 ((pgd_t *)__mfsr(SYSREG_PTBR) + pgd_index(address)) 337 338/* to find an entry in a kernel page-table-directory */ 339#define pgd_offset_k(address) pgd_offset(&init_mm, address) 340 341/* Find an entry in the third-level page table.. */ 342#define pte_index(address) \ 343 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 344#define pte_offset(dir, address) \ 345 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address)) 346#define pte_offset_kernel(dir, address) \ 347 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address)) 348#define pte_offset_map(dir, address) pte_offset_kernel(dir, address) 349#define pte_offset_map_nested(dir, address) pte_offset_kernel(dir, address) 350#define pte_unmap(pte) do { } while (0) 351#define pte_unmap_nested(pte) do { } while (0) 352 353struct vm_area_struct; 354extern void update_mmu_cache(struct vm_area_struct * vma, 355 unsigned long address, pte_t pte); 356 357/* 358 * Encode and decode a swap entry 359 * 360 * Constraints: 361 * _PAGE_FILE at bit 0 362 * _PAGE_TYPE_* at bits 2-3 (for emulating _PAGE_PROTNONE) 363 * _PAGE_PRESENT at bit 10 364 * 365 * We encode the type into bits 4-9 and offset into bits 11-31. This 366 * gives us a 21 bits offset, or 2**21 * 4K = 8G usable swap space per 367 * device, and 64 possible types. 368 * 369 * NOTE: We should set ZEROs at the position of _PAGE_PRESENT 370 * and _PAGE_PROTNONE bits 371 */ 372#define __swp_type(x) (((x).val >> 4) & 0x3f) 373#define __swp_offset(x) ((x).val >> 11) 374#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 4) | ((offset) << 11) }) 375#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 376#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 377 378/* 379 * Encode and decode a nonlinear file mapping entry. We have to 380 * preserve _PAGE_FILE and _PAGE_PRESENT here. _PAGE_TYPE_* isn't 381 * necessary, since _PAGE_FILE implies !_PAGE_PROTNONE (?) 382 */ 383#define PTE_FILE_MAX_BITS 30 384#define pte_to_pgoff(pte) (((pte_val(pte) >> 1) & 0x1ff) \ 385 | ((pte_val(pte) >> 11) << 9)) 386#define pgoff_to_pte(off) ((pte_t) { ((((off) & 0x1ff) << 1) \ 387 | (((off) >> 9) << 11) \ 388 | _PAGE_FILE) }) 389 390typedef pte_t *pte_addr_t; 391 392#define kern_addr_valid(addr) (1) 393 394#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 395 remap_pfn_range(vma, vaddr, pfn, size, prot) 396 397/* No page table caches to initialize (?) */ 398#define pgtable_cache_init() do { } while(0) 399 400#include <asm-generic/pgtable.h> 401 402#endif /* !__ASSEMBLY__ */ 403 404#endif /* __ASM_AVR32_PGTABLE_H */