Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.12-rc4 457 lines 15 kB view raw
1/* 2 * linux/include/asm-arm/pgtable.h 3 * 4 * Copyright (C) 1995-2002 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10#ifndef _ASMARM_PGTABLE_H 11#define _ASMARM_PGTABLE_H 12 13#include <asm-generic/4level-fixup.h> 14 15#include <asm/memory.h> 16#include <asm/proc-fns.h> 17#include <asm/arch/vmalloc.h> 18 19/* 20 * Just any arbitrary offset to the start of the vmalloc VM area: the 21 * current 8MB value just means that there will be a 8MB "hole" after the 22 * physical memory until the kernel virtual memory starts. That means that 23 * any out-of-bounds memory accesses will hopefully be caught. 24 * The vmalloc() routines leaves a hole of 4kB between each vmalloced 25 * area for the same reason. ;) 26 * 27 * Note that platforms may override VMALLOC_START, but they must provide 28 * VMALLOC_END. VMALLOC_END defines the (exclusive) limit of this space, 29 * which may not overlap IO space. 30 */ 31#ifndef VMALLOC_START 32#define VMALLOC_OFFSET (8*1024*1024) 33#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) 34#endif 35 36/* 37 * Hardware-wise, we have a two level page table structure, where the first 38 * level has 4096 entries, and the second level has 256 entries. Each entry 39 * is one 32-bit word. Most of the bits in the second level entry are used 40 * by hardware, and there aren't any "accessed" and "dirty" bits. 41 * 42 * Linux on the other hand has a three level page table structure, which can 43 * be wrapped to fit a two level page table structure easily - using the PGD 44 * and PTE only. However, Linux also expects one "PTE" table per page, and 45 * at least a "dirty" bit. 46 * 47 * Therefore, we tweak the implementation slightly - we tell Linux that we 48 * have 2048 entries in the first level, each of which is 8 bytes (iow, two 49 * hardware pointers to the second level.) The second level contains two 50 * hardware PTE tables arranged contiguously, followed by Linux versions 51 * which contain the state information Linux needs. We, therefore, end up 52 * with 512 entries in the "PTE" level. 53 * 54 * This leads to the page tables having the following layout: 55 * 56 * pgd pte 57 * | | 58 * +--------+ +0 59 * | |-----> +------------+ +0 60 * +- - - - + +4 | h/w pt 0 | 61 * | |-----> +------------+ +1024 62 * +--------+ +8 | h/w pt 1 | 63 * | | +------------+ +2048 64 * +- - - - + | Linux pt 0 | 65 * | | +------------+ +3072 66 * +--------+ | Linux pt 1 | 67 * | | +------------+ +4096 68 * 69 * See L_PTE_xxx below for definitions of bits in the "Linux pt", and 70 * PTE_xxx for definitions of bits appearing in the "h/w pt". 71 * 72 * PMD_xxx definitions refer to bits in the first level page table. 73 * 74 * The "dirty" bit is emulated by only granting hardware write permission 75 * iff the page is marked "writable" and "dirty" in the Linux PTE. This 76 * means that a write to a clean page will cause a permission fault, and 77 * the Linux MM layer will mark the page dirty via handle_pte_fault(). 78 * For the hardware to notice the permission change, the TLB entry must 79 * be flushed, and ptep_establish() does that for us. 80 * 81 * The "accessed" or "young" bit is emulated by a similar method; we only 82 * allow accesses to the page if the "young" bit is set. Accesses to the 83 * page will cause a fault, and handle_pte_fault() will set the young bit 84 * for us as long as the page is marked present in the corresponding Linux 85 * PTE entry. Again, ptep_establish() will ensure that the TLB is up to 86 * date. 87 * 88 * However, when the "young" bit is cleared, we deny access to the page 89 * by clearing the hardware PTE. Currently Linux does not flush the TLB 90 * for us in this case, which means the TLB will retain the transation 91 * until either the TLB entry is evicted under pressure, or a context 92 * switch which changes the user space mapping occurs. 93 */ 94#define PTRS_PER_PTE 512 95#define PTRS_PER_PMD 1 96#define PTRS_PER_PGD 2048 97 98/* 99 * PMD_SHIFT determines the size of the area a second-level page table can map 100 * PGDIR_SHIFT determines what a third-level page table entry can map 101 */ 102#define PMD_SHIFT 21 103#define PGDIR_SHIFT 21 104 105#define LIBRARY_TEXT_START 0x0c000000 106 107#ifndef __ASSEMBLY__ 108extern void __pte_error(const char *file, int line, unsigned long val); 109extern void __pmd_error(const char *file, int line, unsigned long val); 110extern void __pgd_error(const char *file, int line, unsigned long val); 111 112#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte)) 113#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd)) 114#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) 115#endif /* !__ASSEMBLY__ */ 116 117#define PMD_SIZE (1UL << PMD_SHIFT) 118#define PMD_MASK (~(PMD_SIZE-1)) 119#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 120#define PGDIR_MASK (~(PGDIR_SIZE-1)) 121 122/* 123 * This is the lowest virtual address we can permit any user space 124 * mapping to be mapped at. This is particularly important for 125 * non-high vector CPUs. 126 */ 127#define FIRST_USER_ADDRESS PAGE_SIZE 128 129#define FIRST_USER_PGD_NR 1 130#define USER_PTRS_PER_PGD ((TASK_SIZE/PGDIR_SIZE) - FIRST_USER_PGD_NR) 131 132/* 133 * ARMv6 supersection address mask and size definitions. 134 */ 135#define SUPERSECTION_SHIFT 24 136#define SUPERSECTION_SIZE (1UL << SUPERSECTION_SHIFT) 137#define SUPERSECTION_MASK (~(SUPERSECTION_SIZE-1)) 138 139/* 140 * Hardware page table definitions. 141 * 142 * + Level 1 descriptor (PMD) 143 * - common 144 */ 145#define PMD_TYPE_MASK (3 << 0) 146#define PMD_TYPE_FAULT (0 << 0) 147#define PMD_TYPE_TABLE (1 << 0) 148#define PMD_TYPE_SECT (2 << 0) 149#define PMD_BIT4 (1 << 4) 150#define PMD_DOMAIN(x) ((x) << 5) 151#define PMD_PROTECTION (1 << 9) /* v5 */ 152/* 153 * - section 154 */ 155#define PMD_SECT_BUFFERABLE (1 << 2) 156#define PMD_SECT_CACHEABLE (1 << 3) 157#define PMD_SECT_AP_WRITE (1 << 10) 158#define PMD_SECT_AP_READ (1 << 11) 159#define PMD_SECT_TEX(x) ((x) << 12) /* v5 */ 160#define PMD_SECT_APX (1 << 15) /* v6 */ 161#define PMD_SECT_S (1 << 16) /* v6 */ 162#define PMD_SECT_nG (1 << 17) /* v6 */ 163#define PMD_SECT_SUPER (1 << 18) /* v6 */ 164 165#define PMD_SECT_UNCACHED (0) 166#define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE) 167#define PMD_SECT_WT (PMD_SECT_CACHEABLE) 168#define PMD_SECT_WB (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE) 169#define PMD_SECT_MINICACHE (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE) 170#define PMD_SECT_WBWA (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE) 171 172/* 173 * - coarse table (not used) 174 */ 175 176/* 177 * + Level 2 descriptor (PTE) 178 * - common 179 */ 180#define PTE_TYPE_MASK (3 << 0) 181#define PTE_TYPE_FAULT (0 << 0) 182#define PTE_TYPE_LARGE (1 << 0) 183#define PTE_TYPE_SMALL (2 << 0) 184#define PTE_TYPE_EXT (3 << 0) /* v5 */ 185#define PTE_BUFFERABLE (1 << 2) 186#define PTE_CACHEABLE (1 << 3) 187 188/* 189 * - extended small page/tiny page 190 */ 191#define PTE_EXT_AP_MASK (3 << 4) 192#define PTE_EXT_AP_UNO_SRO (0 << 4) 193#define PTE_EXT_AP_UNO_SRW (1 << 4) 194#define PTE_EXT_AP_URO_SRW (2 << 4) 195#define PTE_EXT_AP_URW_SRW (3 << 4) 196#define PTE_EXT_TEX(x) ((x) << 6) /* v5 */ 197 198/* 199 * - small page 200 */ 201#define PTE_SMALL_AP_MASK (0xff << 4) 202#define PTE_SMALL_AP_UNO_SRO (0x00 << 4) 203#define PTE_SMALL_AP_UNO_SRW (0x55 << 4) 204#define PTE_SMALL_AP_URO_SRW (0xaa << 4) 205#define PTE_SMALL_AP_URW_SRW (0xff << 4) 206 207/* 208 * "Linux" PTE definitions. 209 * 210 * We keep two sets of PTEs - the hardware and the linux version. 211 * This allows greater flexibility in the way we map the Linux bits 212 * onto the hardware tables, and allows us to have YOUNG and DIRTY 213 * bits. 214 * 215 * The PTE table pointer refers to the hardware entries; the "Linux" 216 * entries are stored 1024 bytes below. 217 */ 218#define L_PTE_PRESENT (1 << 0) 219#define L_PTE_FILE (1 << 1) /* only when !PRESENT */ 220#define L_PTE_YOUNG (1 << 1) 221#define L_PTE_BUFFERABLE (1 << 2) /* matches PTE */ 222#define L_PTE_CACHEABLE (1 << 3) /* matches PTE */ 223#define L_PTE_USER (1 << 4) 224#define L_PTE_WRITE (1 << 5) 225#define L_PTE_EXEC (1 << 6) 226#define L_PTE_DIRTY (1 << 7) 227 228#ifndef __ASSEMBLY__ 229 230#include <asm/domain.h> 231 232#define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER)) 233#define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL)) 234 235/* 236 * The following macros handle the cache and bufferable bits... 237 */ 238#define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_CACHEABLE | L_PTE_BUFFERABLE 239#define _L_PTE_READ L_PTE_USER | L_PTE_EXEC 240 241extern pgprot_t pgprot_kernel; 242 243#define PAGE_NONE __pgprot(_L_PTE_DEFAULT) 244#define PAGE_COPY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ) 245#define PAGE_SHARED __pgprot(_L_PTE_DEFAULT | _L_PTE_READ | L_PTE_WRITE) 246#define PAGE_READONLY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ) 247#define PAGE_KERNEL pgprot_kernel 248 249#endif /* __ASSEMBLY__ */ 250 251/* 252 * The table below defines the page protection levels that we insert into our 253 * Linux page table version. These get translated into the best that the 254 * architecture can perform. Note that on most ARM hardware: 255 * 1) We cannot do execute protection 256 * 2) If we could do execute protection, then read is implied 257 * 3) write implies read permissions 258 */ 259#define __P000 PAGE_NONE 260#define __P001 PAGE_READONLY 261#define __P010 PAGE_COPY 262#define __P011 PAGE_COPY 263#define __P100 PAGE_READONLY 264#define __P101 PAGE_READONLY 265#define __P110 PAGE_COPY 266#define __P111 PAGE_COPY 267 268#define __S000 PAGE_NONE 269#define __S001 PAGE_READONLY 270#define __S010 PAGE_SHARED 271#define __S011 PAGE_SHARED 272#define __S100 PAGE_READONLY 273#define __S101 PAGE_READONLY 274#define __S110 PAGE_SHARED 275#define __S111 PAGE_SHARED 276 277#ifndef __ASSEMBLY__ 278/* 279 * ZERO_PAGE is a global shared page that is always zero: used 280 * for zero-mapped memory areas etc.. 281 */ 282extern struct page *empty_zero_page; 283#define ZERO_PAGE(vaddr) (empty_zero_page) 284 285#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) 286#define pfn_pte(pfn,prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))) 287 288#define pte_none(pte) (!pte_val(pte)) 289#define pte_clear(mm,addr,ptep) set_pte_at((mm),(addr),(ptep), __pte(0)) 290#define pte_page(pte) (pfn_to_page(pte_pfn(pte))) 291#define pte_offset_kernel(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr)) 292#define pte_offset_map(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr)) 293#define pte_offset_map_nested(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr)) 294#define pte_unmap(pte) do { } while (0) 295#define pte_unmap_nested(pte) do { } while (0) 296 297#define set_pte(ptep, pte) cpu_set_pte(ptep,pte) 298#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) 299 300/* 301 * The following only work if pte_present() is true. 302 * Undefined behaviour if not.. 303 */ 304#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) 305#define pte_read(pte) (pte_val(pte) & L_PTE_USER) 306#define pte_write(pte) (pte_val(pte) & L_PTE_WRITE) 307#define pte_exec(pte) (pte_val(pte) & L_PTE_EXEC) 308#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) 309#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) 310 311/* 312 * The following only works if pte_present() is not true. 313 */ 314#define pte_file(pte) (pte_val(pte) & L_PTE_FILE) 315#define pte_to_pgoff(x) (pte_val(x) >> 2) 316#define pgoff_to_pte(x) __pte(((x) << 2) | L_PTE_FILE) 317 318#define PTE_FILE_MAX_BITS 30 319 320#define PTE_BIT_FUNC(fn,op) \ 321static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } 322 323/*PTE_BIT_FUNC(rdprotect, &= ~L_PTE_USER);*/ 324/*PTE_BIT_FUNC(mkread, |= L_PTE_USER);*/ 325PTE_BIT_FUNC(wrprotect, &= ~L_PTE_WRITE); 326PTE_BIT_FUNC(mkwrite, |= L_PTE_WRITE); 327PTE_BIT_FUNC(exprotect, &= ~L_PTE_EXEC); 328PTE_BIT_FUNC(mkexec, |= L_PTE_EXEC); 329PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY); 330PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY); 331PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG); 332PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG); 333 334/* 335 * Mark the prot value as uncacheable and unbufferable. 336 */ 337#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) & ~(L_PTE_CACHEABLE | L_PTE_BUFFERABLE)) 338#define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~L_PTE_CACHEABLE) 339 340#define pmd_none(pmd) (!pmd_val(pmd)) 341#define pmd_present(pmd) (pmd_val(pmd)) 342#define pmd_bad(pmd) (pmd_val(pmd) & 2) 343 344#define copy_pmd(pmdpd,pmdps) \ 345 do { \ 346 pmdpd[0] = pmdps[0]; \ 347 pmdpd[1] = pmdps[1]; \ 348 flush_pmd_entry(pmdpd); \ 349 } while (0) 350 351#define pmd_clear(pmdp) \ 352 do { \ 353 pmdp[0] = __pmd(0); \ 354 pmdp[1] = __pmd(0); \ 355 clean_pmd_entry(pmdp); \ 356 } while (0) 357 358static inline pte_t *pmd_page_kernel(pmd_t pmd) 359{ 360 unsigned long ptr; 361 362 ptr = pmd_val(pmd) & ~(PTRS_PER_PTE * sizeof(void *) - 1); 363 ptr += PTRS_PER_PTE * sizeof(void *); 364 365 return __va(ptr); 366} 367 368#define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd))) 369 370/* 371 * Permanent address of a page. We never have highmem, so this is trivial. 372 */ 373#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) 374 375/* 376 * Conversion functions: convert a page and protection to a page entry, 377 * and a page entry and page directory to the page they refer to. 378 */ 379#define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot) 380 381/* 382 * The "pgd_xxx()" functions here are trivial for a folded two-level 383 * setup: the pgd is never bad, and a pmd always exists (as it's folded 384 * into the pgd entry) 385 */ 386#define pgd_none(pgd) (0) 387#define pgd_bad(pgd) (0) 388#define pgd_present(pgd) (1) 389#define pgd_clear(pgdp) do { } while (0) 390#define set_pgd(pgd,pgdp) do { } while (0) 391 392#define page_pte_prot(page,prot) mk_pte(page, prot) 393#define page_pte(page) mk_pte(page, __pgprot(0)) 394 395/* to find an entry in a page-table-directory */ 396#define pgd_index(addr) ((addr) >> PGDIR_SHIFT) 397 398#define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr)) 399 400/* to find an entry in a kernel page-table-directory */ 401#define pgd_offset_k(addr) pgd_offset(&init_mm, addr) 402 403/* Find an entry in the second-level page table.. */ 404#define pmd_offset(dir, addr) ((pmd_t *)(dir)) 405 406/* Find an entry in the third-level page table.. */ 407#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 408 409static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 410{ 411 const unsigned long mask = L_PTE_EXEC | L_PTE_WRITE | L_PTE_USER; 412 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); 413 return pte; 414} 415 416extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 417 418/* Encode and decode a swap entry. 419 * 420 * We support up to 32GB of swap on 4k machines 421 */ 422#define __swp_type(x) (((x).val >> 2) & 0x7f) 423#define __swp_offset(x) ((x).val >> 9) 424#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 9) }) 425#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 426#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) 427 428/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ 429/* FIXME: this is not correct */ 430#define kern_addr_valid(addr) (1) 431 432#include <asm-generic/pgtable.h> 433 434/* 435 * We provide our own arch_get_unmapped_area to cope with VIPT caches. 436 */ 437#define HAVE_ARCH_UNMAPPED_AREA 438 439/* 440 * remap a physical address `phys' of size `size' with page protection `prot' 441 * into virtual address `from' 442 */ 443#define io_remap_page_range(vma,from,phys,size,prot) \ 444 remap_pfn_range(vma, from, (phys) >> PAGE_SHIFT, size, prot) 445 446#define io_remap_pfn_range(vma,from,pfn,size,prot) \ 447 remap_pfn_range(vma, from, pfn, size, prot) 448 449#define MK_IOSPACE_PFN(space, pfn) (pfn) 450#define GET_IOSPACE(pfn) 0 451#define GET_PFN(pfn) (pfn) 452 453#define pgtable_cache_init() do { } while (0) 454 455#endif /* !__ASSEMBLY__ */ 456 457#endif /* _ASMARM_PGTABLE_H */