at v3.18-rc2 398 lines 14 kB view raw
1/* 2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * vineetg: May 2011 9 * -Folded PAGE_PRESENT (used by VM) and PAGE_VALID (used by MMU) into 1. 10 * They are semantically the same although in different contexts 11 * VALID marks a TLB entry exists and it will only happen if PRESENT 12 * - Utilise some unused free bits to confine PTE flags to 12 bits 13 * This is a must for 4k pg-sz 14 * 15 * vineetg: Mar 2011 - changes to accomodate MMU TLB Page Descriptor mods 16 * -TLB Locking never really existed, except for initial specs 17 * -SILENT_xxx not needed for our port 18 * -Per my request, MMU V3 changes the layout of some of the bits 19 * to avoid a few shifts in TLB Miss handlers. 20 * 21 * vineetg: April 2010 22 * -PGD entry no longer contains any flags. If empty it is 0, otherwise has 23 * Pg-Tbl ptr. Thus pmd_present(), pmd_valid(), pmd_set( ) become simpler 24 * 25 * vineetg: April 2010 26 * -Switched form 8:11:13 split for page table lookup to 11:8:13 27 * -this speeds up page table allocation itself as we now have to memset 1K 28 * instead of 8k per page table. 29 * -TODO: Right now page table alloc is 8K and rest 7K is unused 30 * need to optimise it 31 * 32 * Amit Bhor, Sameer Dhavale: Codito Technologies 2004 33 */ 34 35#ifndef _ASM_ARC_PGTABLE_H 36#define _ASM_ARC_PGTABLE_H 37 38#include <asm/page.h> 39#include <asm/mmu.h> 40#include <asm-generic/pgtable-nopmd.h> 41 42/************************************************************************** 43 * Page Table Flags 44 * 45 * ARC700 MMU only deals with softare managed TLB entries. 46 * Page Tables are purely for Linux VM's consumption and the bits below are 47 * suited to that (uniqueness). Hence some are not implemented in the TLB and 48 * some have different value in TLB. 49 * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible becoz they live in 50 * seperate PD0 and PD1, which combined forms a translation entry) 51 * while for PTE perspective, they are 8 and 9 respectively 52 * with MMU v3: Most bits (except SHARED) represent the exact hardware pos 53 * (saves some bit shift ops in TLB Miss hdlrs) 54 */ 55 56#if (CONFIG_ARC_MMU_VER <= 2) 57 58#define _PAGE_ACCESSED (1<<1) /* Page is accessed (S) */ 59#define _PAGE_CACHEABLE (1<<2) /* Page is cached (H) */ 60#define _PAGE_EXECUTE (1<<3) /* Page has user execute perm (H) */ 61#define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */ 62#define _PAGE_READ (1<<5) /* Page has user read perm (H) */ 63#define _PAGE_MODIFIED (1<<6) /* Page modified (dirty) (S) */ 64#define _PAGE_FILE (1<<7) /* page cache/ swap (S) */ 65#define _PAGE_GLOBAL (1<<8) /* Page is global (H) */ 66#define _PAGE_PRESENT (1<<10) /* TLB entry is valid (H) */ 67 68#else /* MMU v3 onwards */ 69 70#define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */ 71#define _PAGE_EXECUTE (1<<1) /* Page has user execute perm (H) */ 72#define _PAGE_WRITE (1<<2) /* Page has user write perm (H) */ 73#define _PAGE_READ (1<<3) /* Page has user read perm (H) */ 74#define _PAGE_ACCESSED (1<<4) /* Page is accessed (S) */ 75#define _PAGE_MODIFIED (1<<5) /* Page modified (dirty) (S) */ 76#define _PAGE_FILE (1<<6) /* page cache/ swap (S) */ 77#define _PAGE_GLOBAL (1<<8) /* Page is global (H) */ 78#define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */ 79#define _PAGE_SHARED_CODE (1<<11) /* Shared Code page with cmn vaddr 80 usable for shared TLB entries (H) */ 81#endif 82 83/* vmalloc permissions */ 84#define _K_PAGE_PERMS (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \ 85 _PAGE_GLOBAL | _PAGE_PRESENT) 86 87#ifdef CONFIG_ARC_CACHE_PAGES 88#define _PAGE_DEF_CACHEABLE _PAGE_CACHEABLE 89#else 90#define _PAGE_DEF_CACHEABLE (0) 91#endif 92 93/* Helper for every "user" page 94 * -kernel can R/W/X 95 * -by default cached, unless config otherwise 96 * -present in memory 97 */ 98#define ___DEF (_PAGE_PRESENT | _PAGE_DEF_CACHEABLE) 99 100/* Set of bits not changed in pte_modify */ 101#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED) 102 103/* More Abbrevaited helpers */ 104#define PAGE_U_NONE __pgprot(___DEF) 105#define PAGE_U_R __pgprot(___DEF | _PAGE_READ) 106#define PAGE_U_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE) 107#define PAGE_U_X_R __pgprot(___DEF | _PAGE_READ | _PAGE_EXECUTE) 108#define PAGE_U_X_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE | \ 109 _PAGE_EXECUTE) 110 111#define PAGE_SHARED PAGE_U_W_R 112 113/* While kernel runs out of unstranslated space, vmalloc/modules use a chunk of 114 * user vaddr space - visible in all addr spaces, but kernel mode only 115 * Thus Global, all-kernel-access, no-user-access, cached 116 */ 117#define PAGE_KERNEL __pgprot(_K_PAGE_PERMS | _PAGE_DEF_CACHEABLE) 118 119/* ioremap */ 120#define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS) 121 122/* Masks for actual TLB "PD"s */ 123#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT) 124#define PTE_BITS_RWX (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ) 125#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE) 126 127/************************************************************************** 128 * Mapping of vm_flags (Generic VM) to PTE flags (arch specific) 129 * 130 * Certain cases have 1:1 mapping 131 * e.g. __P101 means VM_READ, VM_EXEC and !VM_SHARED 132 * which directly corresponds to PAGE_U_X_R 133 * 134 * Other rules which cause the divergence from 1:1 mapping 135 * 136 * 1. Although ARC700 can do exclusive execute/write protection (meaning R 137 * can be tracked independet of X/W unlike some other CPUs), still to 138 * keep things consistent with other archs: 139 * -Write implies Read: W => R 140 * -Execute implies Read: X => R 141 * 142 * 2. Pvt Writable doesn't have Write Enabled initially: Pvt-W => !W 143 * This is to enable COW mechanism 144 */ 145 /* xwr */ 146#define __P000 PAGE_U_NONE 147#define __P001 PAGE_U_R 148#define __P010 PAGE_U_R /* Pvt-W => !W */ 149#define __P011 PAGE_U_R /* Pvt-W => !W */ 150#define __P100 PAGE_U_X_R /* X => R */ 151#define __P101 PAGE_U_X_R 152#define __P110 PAGE_U_X_R /* Pvt-W => !W and X => R */ 153#define __P111 PAGE_U_X_R /* Pvt-W => !W */ 154 155#define __S000 PAGE_U_NONE 156#define __S001 PAGE_U_R 157#define __S010 PAGE_U_W_R /* W => R */ 158#define __S011 PAGE_U_W_R 159#define __S100 PAGE_U_X_R /* X => R */ 160#define __S101 PAGE_U_X_R 161#define __S110 PAGE_U_X_W_R /* X => R */ 162#define __S111 PAGE_U_X_W_R 163 164/**************************************************************** 165 * Page Table Lookup split 166 * 167 * We implement 2 tier paging and since this is all software, we are free 168 * to customize the span of a PGD / PTE entry to suit us 169 * 170 * 32 bit virtual address 171 * ------------------------------------------------------- 172 * | BITS_FOR_PGD | BITS_FOR_PTE | BITS_IN_PAGE | 173 * ------------------------------------------------------- 174 * | | | 175 * | | --> off in page frame 176 * | | 177 * | ---> index into Page Table 178 * | 179 * ----> index into Page Directory 180 */ 181 182#define BITS_IN_PAGE PAGE_SHIFT 183 184/* Optimal Sizing of Pg Tbl - based on MMU page size */ 185#if defined(CONFIG_ARC_PAGE_SIZE_8K) 186#define BITS_FOR_PTE 8 187#elif defined(CONFIG_ARC_PAGE_SIZE_16K) 188#define BITS_FOR_PTE 8 189#elif defined(CONFIG_ARC_PAGE_SIZE_4K) 190#define BITS_FOR_PTE 9 191#endif 192 193#define BITS_FOR_PGD (32 - BITS_FOR_PTE - BITS_IN_PAGE) 194 195#define PGDIR_SHIFT (BITS_FOR_PTE + BITS_IN_PAGE) 196#define PGDIR_SIZE (1UL << PGDIR_SHIFT) /* vaddr span, not PDG sz */ 197#define PGDIR_MASK (~(PGDIR_SIZE-1)) 198 199#ifdef __ASSEMBLY__ 200#define PTRS_PER_PTE (1 << BITS_FOR_PTE) 201#define PTRS_PER_PGD (1 << BITS_FOR_PGD) 202#else 203#define PTRS_PER_PTE (1UL << BITS_FOR_PTE) 204#define PTRS_PER_PGD (1UL << BITS_FOR_PGD) 205#endif 206/* 207 * Number of entries a user land program use. 208 * TASK_SIZE is the maximum vaddr that can be used by a userland program. 209 */ 210#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) 211 212/* 213 * No special requirements for lowest virtual address we permit any user space 214 * mapping to be mapped at. 215 */ 216#define FIRST_USER_ADDRESS 0 217 218 219/**************************************************************** 220 * Bucket load of VM Helpers 221 */ 222 223#ifndef __ASSEMBLY__ 224 225#define pte_ERROR(e) \ 226 pr_crit("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) 227#define pgd_ERROR(e) \ 228 pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) 229 230/* the zero page used for uninitialized and anonymous pages */ 231extern char empty_zero_page[PAGE_SIZE]; 232#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 233 234#define pte_unmap(pte) do { } while (0) 235#define pte_unmap_nested(pte) do { } while (0) 236 237#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval)) 238#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval) 239 240/* find the page descriptor of the Page Tbl ref by PMD entry */ 241#define pmd_page(pmd) virt_to_page(pmd_val(pmd) & PAGE_MASK) 242 243/* find the logical addr (phy for ARC) of the Page Tbl ref by PMD entry */ 244#define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK) 245 246/* In a 2 level sys, setup the PGD entry with PTE value */ 247static inline void pmd_set(pmd_t *pmdp, pte_t *ptep) 248{ 249 pmd_val(*pmdp) = (unsigned long)ptep; 250} 251 252#define pte_none(x) (!pte_val(x)) 253#define pte_present(x) (pte_val(x) & _PAGE_PRESENT) 254#define pte_clear(mm, addr, ptep) set_pte_at(mm, addr, ptep, __pte(0)) 255 256#define pmd_none(x) (!pmd_val(x)) 257#define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK)) 258#define pmd_present(x) (pmd_val(x)) 259#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0) 260 261#define pte_page(x) (mem_map + \ 262 (unsigned long)(((pte_val(x) - PAGE_OFFSET) >> PAGE_SHIFT))) 263 264#define mk_pte(page, pgprot) \ 265({ \ 266 pte_t pte; \ 267 pte_val(pte) = __pa(page_address(page)) + pgprot_val(pgprot); \ 268 pte; \ 269}) 270 271/* TBD: Non linear mapping stuff */ 272static inline int pte_file(pte_t pte) 273{ 274 return pte_val(pte) & _PAGE_FILE; 275} 276 277#define PTE_FILE_MAX_BITS 30 278#define pgoff_to_pte(x) __pte(x) 279#define pte_to_pgoff(x) (pte_val(x) >> 2) 280#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) 281#define pfn_pte(pfn, prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))) 282#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 283 284/* 285 * pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system) 286 * and returns ptr to PTE entry corresponding to @addr 287 */ 288#define pte_offset(dir, addr) ((pte_t *)(pmd_page_vaddr(*dir)) +\ 289 __pte_index(addr)) 290 291/* No mapping of Page Tables in high mem etc, so following same as above */ 292#define pte_offset_kernel(dir, addr) pte_offset(dir, addr) 293#define pte_offset_map(dir, addr) pte_offset(dir, addr) 294 295/* Zoo of pte_xxx function */ 296#define pte_read(pte) (pte_val(pte) & _PAGE_READ) 297#define pte_write(pte) (pte_val(pte) & _PAGE_WRITE) 298#define pte_dirty(pte) (pte_val(pte) & _PAGE_MODIFIED) 299#define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED) 300#define pte_special(pte) (0) 301 302#define PTE_BIT_FUNC(fn, op) \ 303 static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } 304 305PTE_BIT_FUNC(wrprotect, &= ~(_PAGE_WRITE)); 306PTE_BIT_FUNC(mkwrite, |= (_PAGE_WRITE)); 307PTE_BIT_FUNC(mkclean, &= ~(_PAGE_MODIFIED)); 308PTE_BIT_FUNC(mkdirty, |= (_PAGE_MODIFIED)); 309PTE_BIT_FUNC(mkold, &= ~(_PAGE_ACCESSED)); 310PTE_BIT_FUNC(mkyoung, |= (_PAGE_ACCESSED)); 311PTE_BIT_FUNC(exprotect, &= ~(_PAGE_EXECUTE)); 312PTE_BIT_FUNC(mkexec, |= (_PAGE_EXECUTE)); 313 314static inline pte_t pte_mkspecial(pte_t pte) { return pte; } 315 316static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 317{ 318 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); 319} 320 321/* Macro to mark a page protection as uncacheable */ 322#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE)) 323 324static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 325 pte_t *ptep, pte_t pteval) 326{ 327 set_pte(ptep, pteval); 328} 329 330/* 331 * All kernel related VM pages are in init's mm. 332 */ 333#define pgd_offset_k(address) pgd_offset(&init_mm, address) 334#define pgd_index(addr) ((addr) >> PGDIR_SHIFT) 335#define pgd_offset(mm, addr) (((mm)->pgd)+pgd_index(addr)) 336 337/* 338 * Macro to quickly access the PGD entry, utlising the fact that some 339 * arch may cache the pointer to Page Directory of "current" task 340 * in a MMU register 341 * 342 * Thus task->mm->pgd (3 pointer dereferences, cache misses etc simply 343 * becomes read a register 344 * 345 * ********CAUTION*******: 346 * Kernel code might be dealing with some mm_struct of NON "current" 347 * Thus use this macro only when you are certain that "current" is current 348 * e.g. when dealing with signal frame setup code etc 349 */ 350#ifndef CONFIG_SMP 351#define pgd_offset_fast(mm, addr) \ 352({ \ 353 pgd_t *pgd_base = (pgd_t *) read_aux_reg(ARC_REG_SCRATCH_DATA0); \ 354 pgd_base + pgd_index(addr); \ 355}) 356#else 357#define pgd_offset_fast(mm, addr) pgd_offset(mm, addr) 358#endif 359 360extern void paging_init(void); 361extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE); 362void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, 363 pte_t *ptep); 364 365/* Encode swap {type,off} tuple into PTE 366 * We reserve 13 bits for 5-bit @type, keeping bits 12-5 zero, ensuring that 367 * both PAGE_FILE and PAGE_PRESENT are zero in a PTE holding swap "identifier" 368 */ 369#define __swp_entry(type, off) ((swp_entry_t) { \ 370 ((type) & 0x1f) | ((off) << 13) }) 371 372/* Decode a PTE containing swap "identifier "into constituents */ 373#define __swp_type(pte_lookalike) (((pte_lookalike).val) & 0x1f) 374#define __swp_offset(pte_lookalike) ((pte_lookalike).val << 13) 375 376/* NOPs, to keep generic kernel happy */ 377#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 378#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 379 380#define kern_addr_valid(addr) (1) 381 382/* 383 * remap a physical page `pfn' of size `size' with page protection `prot' 384 * into virtual address `from' 385 */ 386#include <asm-generic/pgtable.h> 387 388/* to cope with aliasing VIPT cache */ 389#define HAVE_ARCH_UNMAPPED_AREA 390 391/* 392 * No page table caches to initialise 393 */ 394#define pgtable_cache_init() do { } while (0) 395 396#endif /* __ASSEMBLY__ */ 397 398#endif