at v2.6.19-rc5 396 lines 14 kB view raw
1/* 2 * linux/include/asm-arm/pgtable.h 3 * 4 * Copyright (C) 1995-2002 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10#ifndef _ASMARM_PGTABLE_H 11#define _ASMARM_PGTABLE_H 12 13#include <asm-generic/4level-fixup.h> 14#include <asm/proc-fns.h> 15 16#ifndef CONFIG_MMU 17 18#include "pgtable-nommu.h" 19 20#else 21 22#include <asm/memory.h> 23#include <asm/arch/vmalloc.h> 24 25/* 26 * Just any arbitrary offset to the start of the vmalloc VM area: the 27 * current 8MB value just means that there will be a 8MB "hole" after the 28 * physical memory until the kernel virtual memory starts. That means that 29 * any out-of-bounds memory accesses will hopefully be caught. 30 * The vmalloc() routines leaves a hole of 4kB between each vmalloced 31 * area for the same reason. ;) 32 * 33 * Note that platforms may override VMALLOC_START, but they must provide 34 * VMALLOC_END. VMALLOC_END defines the (exclusive) limit of this space, 35 * which may not overlap IO space. 36 */ 37#ifndef VMALLOC_START 38#define VMALLOC_OFFSET (8*1024*1024) 39#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) 40#endif 41 42/* 43 * Hardware-wise, we have a two level page table structure, where the first 44 * level has 4096 entries, and the second level has 256 entries. Each entry 45 * is one 32-bit word. Most of the bits in the second level entry are used 46 * by hardware, and there aren't any "accessed" and "dirty" bits. 47 * 48 * Linux on the other hand has a three level page table structure, which can 49 * be wrapped to fit a two level page table structure easily - using the PGD 50 * and PTE only. However, Linux also expects one "PTE" table per page, and 51 * at least a "dirty" bit. 52 * 53 * Therefore, we tweak the implementation slightly - we tell Linux that we 54 * have 2048 entries in the first level, each of which is 8 bytes (iow, two 55 * hardware pointers to the second level.) The second level contains two 56 * hardware PTE tables arranged contiguously, followed by Linux versions 57 * which contain the state information Linux needs. We, therefore, end up 58 * with 512 entries in the "PTE" level. 59 * 60 * This leads to the page tables having the following layout: 61 * 62 * pgd pte 63 * | | 64 * +--------+ +0 65 * | |-----> +------------+ +0 66 * +- - - - + +4 | h/w pt 0 | 67 * | |-----> +------------+ +1024 68 * +--------+ +8 | h/w pt 1 | 69 * | | +------------+ +2048 70 * +- - - - + | Linux pt 0 | 71 * | | +------------+ +3072 72 * +--------+ | Linux pt 1 | 73 * | | +------------+ +4096 74 * 75 * See L_PTE_xxx below for definitions of bits in the "Linux pt", and 76 * PTE_xxx for definitions of bits appearing in the "h/w pt". 77 * 78 * PMD_xxx definitions refer to bits in the first level page table. 79 * 80 * The "dirty" bit is emulated by only granting hardware write permission 81 * iff the page is marked "writable" and "dirty" in the Linux PTE. This 82 * means that a write to a clean page will cause a permission fault, and 83 * the Linux MM layer will mark the page dirty via handle_pte_fault(). 84 * For the hardware to notice the permission change, the TLB entry must 85 * be flushed, and ptep_establish() does that for us. 86 * 87 * The "accessed" or "young" bit is emulated by a similar method; we only 88 * allow accesses to the page if the "young" bit is set. Accesses to the 89 * page will cause a fault, and handle_pte_fault() will set the young bit 90 * for us as long as the page is marked present in the corresponding Linux 91 * PTE entry. Again, ptep_establish() will ensure that the TLB is up to 92 * date. 93 * 94 * However, when the "young" bit is cleared, we deny access to the page 95 * by clearing the hardware PTE. Currently Linux does not flush the TLB 96 * for us in this case, which means the TLB will retain the transation 97 * until either the TLB entry is evicted under pressure, or a context 98 * switch which changes the user space mapping occurs. 99 */ 100#define PTRS_PER_PTE 512 101#define PTRS_PER_PMD 1 102#define PTRS_PER_PGD 2048 103 104/* 105 * PMD_SHIFT determines the size of the area a second-level page table can map 106 * PGDIR_SHIFT determines what a third-level page table entry can map 107 */ 108#define PMD_SHIFT 21 109#define PGDIR_SHIFT 21 110 111#define LIBRARY_TEXT_START 0x0c000000 112 113#ifndef __ASSEMBLY__ 114extern void __pte_error(const char *file, int line, unsigned long val); 115extern void __pmd_error(const char *file, int line, unsigned long val); 116extern void __pgd_error(const char *file, int line, unsigned long val); 117 118#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte)) 119#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd)) 120#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) 121#endif /* !__ASSEMBLY__ */ 122 123#define PMD_SIZE (1UL << PMD_SHIFT) 124#define PMD_MASK (~(PMD_SIZE-1)) 125#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 126#define PGDIR_MASK (~(PGDIR_SIZE-1)) 127 128/* 129 * This is the lowest virtual address we can permit any user space 130 * mapping to be mapped at. This is particularly important for 131 * non-high vector CPUs. 132 */ 133#define FIRST_USER_ADDRESS PAGE_SIZE 134 135#define FIRST_USER_PGD_NR 1 136#define USER_PTRS_PER_PGD ((TASK_SIZE/PGDIR_SIZE) - FIRST_USER_PGD_NR) 137 138/* 139 * section address mask and size definitions. 140 */ 141#define SECTION_SHIFT 20 142#define SECTION_SIZE (1UL << SECTION_SHIFT) 143#define SECTION_MASK (~(SECTION_SIZE-1)) 144 145/* 146 * ARMv6 supersection address mask and size definitions. 147 */ 148#define SUPERSECTION_SHIFT 24 149#define SUPERSECTION_SIZE (1UL << SUPERSECTION_SHIFT) 150#define SUPERSECTION_MASK (~(SUPERSECTION_SIZE-1)) 151 152/* 153 * "Linux" PTE definitions. 154 * 155 * We keep two sets of PTEs - the hardware and the linux version. 156 * This allows greater flexibility in the way we map the Linux bits 157 * onto the hardware tables, and allows us to have YOUNG and DIRTY 158 * bits. 159 * 160 * The PTE table pointer refers to the hardware entries; the "Linux" 161 * entries are stored 1024 bytes below. 162 */ 163#define L_PTE_PRESENT (1 << 0) 164#define L_PTE_FILE (1 << 1) /* only when !PRESENT */ 165#define L_PTE_YOUNG (1 << 1) 166#define L_PTE_BUFFERABLE (1 << 2) /* matches PTE */ 167#define L_PTE_CACHEABLE (1 << 3) /* matches PTE */ 168#define L_PTE_USER (1 << 4) 169#define L_PTE_WRITE (1 << 5) 170#define L_PTE_EXEC (1 << 6) 171#define L_PTE_DIRTY (1 << 7) 172#define L_PTE_COHERENT (1 << 9) /* I/O coherent (xsc3) */ 173#define L_PTE_SHARED (1 << 10) /* shared between CPUs (v6) */ 174#define L_PTE_ASID (1 << 11) /* non-global (use ASID, v6) */ 175 176#ifndef __ASSEMBLY__ 177 178/* 179 * The following macros handle the cache and bufferable bits... 180 */ 181#define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_CACHEABLE | L_PTE_BUFFERABLE 182#define _L_PTE_READ L_PTE_USER | L_PTE_EXEC 183 184extern pgprot_t pgprot_kernel; 185 186#define PAGE_NONE __pgprot(_L_PTE_DEFAULT) 187#define PAGE_COPY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ) 188#define PAGE_SHARED __pgprot(_L_PTE_DEFAULT | _L_PTE_READ | L_PTE_WRITE) 189#define PAGE_READONLY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ) 190#define PAGE_KERNEL pgprot_kernel 191 192#endif /* __ASSEMBLY__ */ 193 194/* 195 * The table below defines the page protection levels that we insert into our 196 * Linux page table version. These get translated into the best that the 197 * architecture can perform. Note that on most ARM hardware: 198 * 1) We cannot do execute protection 199 * 2) If we could do execute protection, then read is implied 200 * 3) write implies read permissions 201 */ 202#define __P000 PAGE_NONE 203#define __P001 PAGE_READONLY 204#define __P010 PAGE_COPY 205#define __P011 PAGE_COPY 206#define __P100 PAGE_READONLY 207#define __P101 PAGE_READONLY 208#define __P110 PAGE_COPY 209#define __P111 PAGE_COPY 210 211#define __S000 PAGE_NONE 212#define __S001 PAGE_READONLY 213#define __S010 PAGE_SHARED 214#define __S011 PAGE_SHARED 215#define __S100 PAGE_READONLY 216#define __S101 PAGE_READONLY 217#define __S110 PAGE_SHARED 218#define __S111 PAGE_SHARED 219 220#ifndef __ASSEMBLY__ 221/* 222 * ZERO_PAGE is a global shared page that is always zero: used 223 * for zero-mapped memory areas etc.. 224 */ 225extern struct page *empty_zero_page; 226#define ZERO_PAGE(vaddr) (empty_zero_page) 227 228#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) 229#define pfn_pte(pfn,prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))) 230 231#define pte_none(pte) (!pte_val(pte)) 232#define pte_clear(mm,addr,ptep) set_pte_at((mm),(addr),(ptep), __pte(0)) 233#define pte_page(pte) (pfn_to_page(pte_pfn(pte))) 234#define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr)) 235#define pte_offset_map(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr)) 236#define pte_offset_map_nested(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr)) 237#define pte_unmap(pte) do { } while (0) 238#define pte_unmap_nested(pte) do { } while (0) 239 240#define set_pte(ptep, pte) cpu_set_pte(ptep,pte) 241#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) 242 243/* 244 * The following only work if pte_present() is true. 245 * Undefined behaviour if not.. 246 */ 247#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) 248#define pte_read(pte) (pte_val(pte) & L_PTE_USER) 249#define pte_write(pte) (pte_val(pte) & L_PTE_WRITE) 250#define pte_exec(pte) (pte_val(pte) & L_PTE_EXEC) 251#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) 252#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) 253 254/* 255 * The following only works if pte_present() is not true. 256 */ 257#define pte_file(pte) (pte_val(pte) & L_PTE_FILE) 258#define pte_to_pgoff(x) (pte_val(x) >> 2) 259#define pgoff_to_pte(x) __pte(((x) << 2) | L_PTE_FILE) 260 261#define PTE_FILE_MAX_BITS 30 262 263#define PTE_BIT_FUNC(fn,op) \ 264static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } 265 266/*PTE_BIT_FUNC(rdprotect, &= ~L_PTE_USER);*/ 267/*PTE_BIT_FUNC(mkread, |= L_PTE_USER);*/ 268PTE_BIT_FUNC(wrprotect, &= ~L_PTE_WRITE); 269PTE_BIT_FUNC(mkwrite, |= L_PTE_WRITE); 270PTE_BIT_FUNC(exprotect, &= ~L_PTE_EXEC); 271PTE_BIT_FUNC(mkexec, |= L_PTE_EXEC); 272PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY); 273PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY); 274PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG); 275PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG); 276 277/* 278 * Mark the prot value as uncacheable and unbufferable. 279 */ 280#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) & ~(L_PTE_CACHEABLE | L_PTE_BUFFERABLE)) 281#define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~L_PTE_CACHEABLE) 282 283#define pmd_none(pmd) (!pmd_val(pmd)) 284#define pmd_present(pmd) (pmd_val(pmd)) 285#define pmd_bad(pmd) (pmd_val(pmd) & 2) 286 287#define copy_pmd(pmdpd,pmdps) \ 288 do { \ 289 pmdpd[0] = pmdps[0]; \ 290 pmdpd[1] = pmdps[1]; \ 291 flush_pmd_entry(pmdpd); \ 292 } while (0) 293 294#define pmd_clear(pmdp) \ 295 do { \ 296 pmdp[0] = __pmd(0); \ 297 pmdp[1] = __pmd(0); \ 298 clean_pmd_entry(pmdp); \ 299 } while (0) 300 301static inline pte_t *pmd_page_vaddr(pmd_t pmd) 302{ 303 unsigned long ptr; 304 305 ptr = pmd_val(pmd) & ~(PTRS_PER_PTE * sizeof(void *) - 1); 306 ptr += PTRS_PER_PTE * sizeof(void *); 307 308 return __va(ptr); 309} 310 311#define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd))) 312 313/* 314 * Permanent address of a page. We never have highmem, so this is trivial. 315 */ 316#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) 317 318/* 319 * Conversion functions: convert a page and protection to a page entry, 320 * and a page entry and page directory to the page they refer to. 321 */ 322#define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot) 323 324/* 325 * The "pgd_xxx()" functions here are trivial for a folded two-level 326 * setup: the pgd is never bad, and a pmd always exists (as it's folded 327 * into the pgd entry) 328 */ 329#define pgd_none(pgd) (0) 330#define pgd_bad(pgd) (0) 331#define pgd_present(pgd) (1) 332#define pgd_clear(pgdp) do { } while (0) 333#define set_pgd(pgd,pgdp) do { } while (0) 334 335/* to find an entry in a page-table-directory */ 336#define pgd_index(addr) ((addr) >> PGDIR_SHIFT) 337 338#define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr)) 339 340/* to find an entry in a kernel page-table-directory */ 341#define pgd_offset_k(addr) pgd_offset(&init_mm, addr) 342 343/* Find an entry in the second-level page table.. */ 344#define pmd_offset(dir, addr) ((pmd_t *)(dir)) 345 346/* Find an entry in the third-level page table.. */ 347#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 348 349static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 350{ 351 const unsigned long mask = L_PTE_EXEC | L_PTE_WRITE | L_PTE_USER; 352 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); 353 return pte; 354} 355 356extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 357 358/* Encode and decode a swap entry. 359 * 360 * We support up to 32GB of swap on 4k machines 361 */ 362#define __swp_type(x) (((x).val >> 2) & 0x7f) 363#define __swp_offset(x) ((x).val >> 9) 364#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 9) }) 365#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 366#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) 367 368/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ 369/* FIXME: this is not correct */ 370#define kern_addr_valid(addr) (1) 371 372#include <asm-generic/pgtable.h> 373 374/* 375 * We provide our own arch_get_unmapped_area to cope with VIPT caches. 376 */ 377#define HAVE_ARCH_UNMAPPED_AREA 378 379/* 380 * remap a physical page `pfn' of size `size' with page protection `prot' 381 * into virtual address `from' 382 */ 383#define io_remap_pfn_range(vma,from,pfn,size,prot) \ 384 remap_pfn_range(vma, from, pfn, size, prot) 385 386#define MK_IOSPACE_PFN(space, pfn) (pfn) 387#define GET_IOSPACE(pfn) 0 388#define GET_PFN(pfn) (pfn) 389 390#define pgtable_cache_init() do { } while (0) 391 392#endif /* !__ASSEMBLY__ */ 393 394#endif /* CONFIG_MMU */ 395 396#endif /* _ASMARM_PGTABLE_H */