Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.25-rc2 348 lines 9.5 kB view raw
1/* 2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) 3 * Copyright 2003 PathScale, Inc. 4 * Derived from include/asm-i386/pgtable.h 5 * Licensed under the GPL 6 */ 7 8#ifndef __UM_PGTABLE_H 9#define __UM_PGTABLE_H 10 11#include <asm/fixmap.h> 12 13#define _PAGE_PRESENT 0x001 14#define _PAGE_NEWPAGE 0x002 15#define _PAGE_NEWPROT 0x004 16#define _PAGE_RW 0x020 17#define _PAGE_USER 0x040 18#define _PAGE_ACCESSED 0x080 19#define _PAGE_DIRTY 0x100 20/* If _PAGE_PRESENT is clear, we use these: */ 21#define _PAGE_FILE 0x008 /* nonlinear file mapping, saved PTE; unset:swap */ 22#define _PAGE_PROTNONE 0x010 /* if the user mapped it with PROT_NONE; 23 pte_present gives true */ 24 25#ifdef CONFIG_3_LEVEL_PGTABLES 26#include "asm/pgtable-3level.h" 27#else 28#include "asm/pgtable-2level.h" 29#endif 30 31extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 32 33/* zero page used for uninitialized stuff */ 34extern unsigned long *empty_zero_page; 35 36#define pgtable_cache_init() do ; while (0) 37 38/* Just any arbitrary offset to the start of the vmalloc VM area: the 39 * current 8MB value just means that there will be a 8MB "hole" after the 40 * physical memory until the kernel virtual memory starts. That means that 41 * any out-of-bounds memory accesses will hopefully be caught. 42 * The vmalloc() routines leaves a hole of 4kB between each vmalloced 43 * area for the same reason. ;) 44 */ 45 46extern unsigned long end_iomem; 47 48#define VMALLOC_OFFSET (__va_space) 49#define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) 50#ifdef CONFIG_HIGHMEM 51# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) 52#else 53# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) 54#endif 55 56#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) 57#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) 58#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) 59 60#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) 61#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) 62#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) 63#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) 64#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) 65 66/* 67 * The i386 can't do page protection for execute, and considers that the same 68 * are read. 69 * Also, write permissions imply read permissions. This is the closest we can 70 * get.. 71 */ 72#define __P000 PAGE_NONE 73#define __P001 PAGE_READONLY 74#define __P010 PAGE_COPY 75#define __P011 PAGE_COPY 76#define __P100 PAGE_READONLY 77#define __P101 PAGE_READONLY 78#define __P110 PAGE_COPY 79#define __P111 PAGE_COPY 80 81#define __S000 PAGE_NONE 82#define __S001 PAGE_READONLY 83#define __S010 PAGE_SHARED 84#define __S011 PAGE_SHARED 85#define __S100 PAGE_READONLY 86#define __S101 PAGE_READONLY 87#define __S110 PAGE_SHARED 88#define __S111 PAGE_SHARED 89 90/* 91 * ZERO_PAGE is a global shared page that is always zero: used 92 * for zero-mapped memory areas etc.. 93 */ 94#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page) 95 96#define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE)) 97 98#define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE)) 99#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) 100 101#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) 102#define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0) 103 104#define pmd_newpage(x) (pmd_val(x) & _PAGE_NEWPAGE) 105#define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE) 106 107#define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE) 108#define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE) 109 110#define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK) 111 112#define pte_page(x) pfn_to_page(pte_pfn(x)) 113 114#define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE)) 115 116/* 117 * ================================= 118 * Flags checking section. 119 * ================================= 120 */ 121 122static inline int pte_none(pte_t pte) 123{ 124 return pte_is_zero(pte); 125} 126 127/* 128 * The following only work if pte_present() is true. 129 * Undefined behaviour if not.. 130 */ 131static inline int pte_read(pte_t pte) 132{ 133 return((pte_get_bits(pte, _PAGE_USER)) && 134 !(pte_get_bits(pte, _PAGE_PROTNONE))); 135} 136 137static inline int pte_exec(pte_t pte){ 138 return((pte_get_bits(pte, _PAGE_USER)) && 139 !(pte_get_bits(pte, _PAGE_PROTNONE))); 140} 141 142static inline int pte_write(pte_t pte) 143{ 144 return((pte_get_bits(pte, _PAGE_RW)) && 145 !(pte_get_bits(pte, _PAGE_PROTNONE))); 146} 147 148/* 149 * The following only works if pte_present() is not true. 150 */ 151static inline int pte_file(pte_t pte) 152{ 153 return pte_get_bits(pte, _PAGE_FILE); 154} 155 156static inline int pte_dirty(pte_t pte) 157{ 158 return pte_get_bits(pte, _PAGE_DIRTY); 159} 160 161static inline int pte_young(pte_t pte) 162{ 163 return pte_get_bits(pte, _PAGE_ACCESSED); 164} 165 166static inline int pte_newpage(pte_t pte) 167{ 168 return pte_get_bits(pte, _PAGE_NEWPAGE); 169} 170 171static inline int pte_newprot(pte_t pte) 172{ 173 return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT))); 174} 175 176/* 177 * ================================= 178 * Flags setting section. 179 * ================================= 180 */ 181 182static inline pte_t pte_mknewprot(pte_t pte) 183{ 184 pte_set_bits(pte, _PAGE_NEWPROT); 185 return(pte); 186} 187 188static inline pte_t pte_mkclean(pte_t pte) 189{ 190 pte_clear_bits(pte, _PAGE_DIRTY); 191 return(pte); 192} 193 194static inline pte_t pte_mkold(pte_t pte) 195{ 196 pte_clear_bits(pte, _PAGE_ACCESSED); 197 return(pte); 198} 199 200static inline pte_t pte_wrprotect(pte_t pte) 201{ 202 pte_clear_bits(pte, _PAGE_RW); 203 return(pte_mknewprot(pte)); 204} 205 206static inline pte_t pte_mkread(pte_t pte) 207{ 208 pte_set_bits(pte, _PAGE_USER); 209 return(pte_mknewprot(pte)); 210} 211 212static inline pte_t pte_mkdirty(pte_t pte) 213{ 214 pte_set_bits(pte, _PAGE_DIRTY); 215 return(pte); 216} 217 218static inline pte_t pte_mkyoung(pte_t pte) 219{ 220 pte_set_bits(pte, _PAGE_ACCESSED); 221 return(pte); 222} 223 224static inline pte_t pte_mkwrite(pte_t pte) 225{ 226 pte_set_bits(pte, _PAGE_RW); 227 return(pte_mknewprot(pte)); 228} 229 230static inline pte_t pte_mkuptodate(pte_t pte) 231{ 232 pte_clear_bits(pte, _PAGE_NEWPAGE); 233 if(pte_present(pte)) 234 pte_clear_bits(pte, _PAGE_NEWPROT); 235 return(pte); 236} 237 238static inline pte_t pte_mknewpage(pte_t pte) 239{ 240 pte_set_bits(pte, _PAGE_NEWPAGE); 241 return(pte); 242} 243 244static inline void set_pte(pte_t *pteptr, pte_t pteval) 245{ 246 pte_copy(*pteptr, pteval); 247 248 /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so 249 * fix_range knows to unmap it. _PAGE_NEWPROT is specific to 250 * mapped pages. 251 */ 252 253 *pteptr = pte_mknewpage(*pteptr); 254 if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr); 255} 256#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) 257 258/* 259 * Conversion functions: convert a page and protection to a page entry, 260 * and a page entry and page directory to the page they refer to. 261 */ 262 263#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys)) 264#define __virt_to_page(virt) phys_to_page(__pa(virt)) 265#define page_to_phys(page) pfn_to_phys((pfn_t) page_to_pfn(page)) 266#define virt_to_page(addr) __virt_to_page((const unsigned long) addr) 267 268#define mk_pte(page, pgprot) \ 269 ({ pte_t pte; \ 270 \ 271 pte_set_val(pte, page_to_phys(page), (pgprot)); \ 272 if (pte_present(pte)) \ 273 pte_mknewprot(pte_mknewpage(pte)); \ 274 pte;}) 275 276static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 277{ 278 pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot); 279 return pte; 280} 281 282/* 283 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] 284 * 285 * this macro returns the index of the entry in the pgd page which would 286 * control the given virtual address 287 */ 288#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) 289 290/* 291 * pgd_offset() returns a (pgd_t *) 292 * pgd_index() is used get the offset into the pgd page's array of pgd_t's; 293 */ 294#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) 295 296/* 297 * a shortcut which implies the use of the kernel's pgd, instead 298 * of a process's 299 */ 300#define pgd_offset_k(address) pgd_offset(&init_mm, address) 301 302/* 303 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] 304 * 305 * this macro returns the index of the entry in the pmd page which would 306 * control the given virtual address 307 */ 308#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) 309#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) 310 311#define pmd_page_vaddr(pmd) \ 312 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) 313 314/* 315 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] 316 * 317 * this macro returns the index of the entry in the pte page which would 318 * control the given virtual address 319 */ 320#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 321#define pte_offset_kernel(dir, address) \ 322 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address)) 323#define pte_offset_map(dir, address) \ 324 ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address)) 325#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address) 326#define pte_unmap(pte) do { } while (0) 327#define pte_unmap_nested(pte) do { } while (0) 328 329struct mm_struct; 330extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr); 331 332#define update_mmu_cache(vma,address,pte) do ; while (0) 333 334/* Encode and de-code a swap entry */ 335#define __swp_type(x) (((x).val >> 4) & 0x3f) 336#define __swp_offset(x) ((x).val >> 11) 337 338#define __swp_entry(type, offset) \ 339 ((swp_entry_t) { ((type) << 4) | ((offset) << 11) }) 340#define __pte_to_swp_entry(pte) \ 341 ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) }) 342#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 343 344#define kern_addr_valid(addr) (1) 345 346#include <asm-generic/pgtable.h> 347 348#endif