at v4.12 297 lines 9.6 kB view raw
1/* 2 * linux/arch/unicore32/include/asm/pgtable.h 3 * 4 * Code specific to PKUnity SoC and UniCore ISA 5 * 6 * Copyright (C) 2001-2010 GUAN Xue-tao 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12#ifndef __UNICORE_PGTABLE_H__ 13#define __UNICORE_PGTABLE_H__ 14 15#define __ARCH_USE_5LEVEL_HACK 16#include <asm-generic/pgtable-nopmd.h> 17#include <asm/cpu-single.h> 18 19#include <asm/memory.h> 20#include <asm/pgtable-hwdef.h> 21 22/* 23 * Just any arbitrary offset to the start of the vmalloc VM area: the 24 * current 8MB value just means that there will be a 8MB "hole" after the 25 * physical memory until the kernel virtual memory starts. That means that 26 * any out-of-bounds memory accesses will hopefully be caught. 27 * The vmalloc() routines leaves a hole of 4kB between each vmalloced 28 * area for the same reason. ;) 29 * 30 * Note that platforms may override VMALLOC_START, but they must provide 31 * VMALLOC_END. VMALLOC_END defines the (exclusive) limit of this space, 32 * which may not overlap IO space. 33 */ 34#ifndef VMALLOC_START 35#define VMALLOC_OFFSET SZ_8M 36#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) \ 37 & ~(VMALLOC_OFFSET-1)) 38#define VMALLOC_END (0xff000000UL) 39#endif 40 41#define PTRS_PER_PTE 1024 42#define PTRS_PER_PGD 1024 43 44/* 45 * PGDIR_SHIFT determines what a third-level page table entry can map 46 */ 47#define PGDIR_SHIFT 22 48 49#ifndef __ASSEMBLY__ 50extern void __pte_error(const char *file, int line, unsigned long val); 51extern void __pgd_error(const char *file, int line, unsigned long val); 52 53#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte)) 54#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) 55#endif /* !__ASSEMBLY__ */ 56 57#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 58#define PGDIR_MASK (~(PGDIR_SIZE-1)) 59 60/* 61 * This is the lowest virtual address we can permit any user space 62 * mapping to be mapped at. This is particularly important for 63 * non-high vector CPUs. 64 */ 65#define FIRST_USER_ADDRESS PAGE_SIZE 66 67#define FIRST_USER_PGD_NR 1 68#define USER_PTRS_PER_PGD ((TASK_SIZE/PGDIR_SIZE) - FIRST_USER_PGD_NR) 69 70/* 71 * section address mask and size definitions. 72 */ 73#define SECTION_SHIFT 22 74#define SECTION_SIZE (1UL << SECTION_SHIFT) 75#define SECTION_MASK (~(SECTION_SIZE-1)) 76 77#ifndef __ASSEMBLY__ 78 79/* 80 * The pgprot_* and protection_map entries will be fixed up in runtime 81 * to include the cachable bits based on memory policy, as well as any 82 * architecture dependent bits. 83 */ 84#define _PTE_DEFAULT (PTE_PRESENT | PTE_YOUNG | PTE_CACHEABLE) 85 86extern pgprot_t pgprot_user; 87extern pgprot_t pgprot_kernel; 88 89#define PAGE_NONE pgprot_user 90#define PAGE_SHARED __pgprot(pgprot_val(pgprot_user | PTE_READ \ 91 | PTE_WRITE)) 92#define PAGE_SHARED_EXEC __pgprot(pgprot_val(pgprot_user | PTE_READ \ 93 | PTE_WRITE \ 94 | PTE_EXEC)) 95#define PAGE_COPY __pgprot(pgprot_val(pgprot_user | PTE_READ) 96#define PAGE_COPY_EXEC __pgprot(pgprot_val(pgprot_user | PTE_READ \ 97 | PTE_EXEC)) 98#define PAGE_READONLY __pgprot(pgprot_val(pgprot_user | PTE_READ)) 99#define PAGE_READONLY_EXEC __pgprot(pgprot_val(pgprot_user | PTE_READ \ 100 | PTE_EXEC)) 101#define PAGE_KERNEL pgprot_kernel 102#define PAGE_KERNEL_EXEC __pgprot(pgprot_val(pgprot_kernel | PTE_EXEC)) 103 104#define __PAGE_NONE __pgprot(_PTE_DEFAULT) 105#define __PAGE_SHARED __pgprot(_PTE_DEFAULT | PTE_READ \ 106 | PTE_WRITE) 107#define __PAGE_SHARED_EXEC __pgprot(_PTE_DEFAULT | PTE_READ \ 108 | PTE_WRITE \ 109 | PTE_EXEC) 110#define __PAGE_COPY __pgprot(_PTE_DEFAULT | PTE_READ) 111#define __PAGE_COPY_EXEC __pgprot(_PTE_DEFAULT | PTE_READ \ 112 | PTE_EXEC) 113#define __PAGE_READONLY __pgprot(_PTE_DEFAULT | PTE_READ) 114#define __PAGE_READONLY_EXEC __pgprot(_PTE_DEFAULT | PTE_READ \ 115 | PTE_EXEC) 116 117#endif /* __ASSEMBLY__ */ 118 119/* 120 * The table below defines the page protection levels that we insert into our 121 * Linux page table version. These get translated into the best that the 122 * architecture can perform. Note that on UniCore hardware: 123 * 1) We cannot do execute protection 124 * 2) If we could do execute protection, then read is implied 125 * 3) write implies read permissions 126 */ 127#define __P000 __PAGE_NONE 128#define __P001 __PAGE_READONLY 129#define __P010 __PAGE_COPY 130#define __P011 __PAGE_COPY 131#define __P100 __PAGE_READONLY_EXEC 132#define __P101 __PAGE_READONLY_EXEC 133#define __P110 __PAGE_COPY_EXEC 134#define __P111 __PAGE_COPY_EXEC 135 136#define __S000 __PAGE_NONE 137#define __S001 __PAGE_READONLY 138#define __S010 __PAGE_SHARED 139#define __S011 __PAGE_SHARED 140#define __S100 __PAGE_READONLY_EXEC 141#define __S101 __PAGE_READONLY_EXEC 142#define __S110 __PAGE_SHARED_EXEC 143#define __S111 __PAGE_SHARED_EXEC 144 145#ifndef __ASSEMBLY__ 146/* 147 * ZERO_PAGE is a global shared page that is always zero: used 148 * for zero-mapped memory areas etc.. 149 */ 150extern struct page *empty_zero_page; 151#define ZERO_PAGE(vaddr) (empty_zero_page) 152 153#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) 154#define pfn_pte(pfn, prot) (__pte(((pfn) << PAGE_SHIFT) \ 155 | pgprot_val(prot))) 156 157#define pte_none(pte) (!pte_val(pte)) 158#define pte_clear(mm, addr, ptep) set_pte(ptep, __pte(0)) 159#define pte_page(pte) (pfn_to_page(pte_pfn(pte))) 160#define pte_offset_kernel(dir, addr) (pmd_page_vaddr(*(dir)) \ 161 + __pte_index(addr)) 162 163#define pte_offset_map(dir, addr) (pmd_page_vaddr(*(dir)) \ 164 + __pte_index(addr)) 165#define pte_unmap(pte) do { } while (0) 166 167#define set_pte(ptep, pte) cpu_set_pte(ptep, pte) 168 169#define set_pte_at(mm, addr, ptep, pteval) \ 170 do { \ 171 set_pte(ptep, pteval); \ 172 } while (0) 173 174/* 175 * The following only work if pte_present() is true. 176 * Undefined behaviour if not.. 177 */ 178#define pte_present(pte) (pte_val(pte) & PTE_PRESENT) 179#define pte_write(pte) (pte_val(pte) & PTE_WRITE) 180#define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY) 181#define pte_young(pte) (pte_val(pte) & PTE_YOUNG) 182#define pte_exec(pte) (pte_val(pte) & PTE_EXEC) 183#define pte_special(pte) (0) 184 185#define PTE_BIT_FUNC(fn, op) \ 186static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } 187 188PTE_BIT_FUNC(wrprotect, &= ~PTE_WRITE); 189PTE_BIT_FUNC(mkwrite, |= PTE_WRITE); 190PTE_BIT_FUNC(mkclean, &= ~PTE_DIRTY); 191PTE_BIT_FUNC(mkdirty, |= PTE_DIRTY); 192PTE_BIT_FUNC(mkold, &= ~PTE_YOUNG); 193PTE_BIT_FUNC(mkyoung, |= PTE_YOUNG); 194 195static inline pte_t pte_mkspecial(pte_t pte) { return pte; } 196 197/* 198 * Mark the prot value as uncacheable. 199 */ 200#define pgprot_noncached(prot) \ 201 __pgprot(pgprot_val(prot) & ~PTE_CACHEABLE) 202#define pgprot_writecombine(prot) \ 203 __pgprot(pgprot_val(prot) & ~PTE_CACHEABLE) 204#define pgprot_dmacoherent(prot) \ 205 __pgprot(pgprot_val(prot) & ~PTE_CACHEABLE) 206 207#define pmd_none(pmd) (!pmd_val(pmd)) 208#define pmd_present(pmd) (pmd_val(pmd) & PMD_PRESENT) 209#define pmd_bad(pmd) (((pmd_val(pmd) & \ 210 (PMD_PRESENT | PMD_TYPE_MASK)) \ 211 != (PMD_PRESENT | PMD_TYPE_TABLE))) 212 213#define set_pmd(pmdpd, pmdval) \ 214 do { \ 215 *(pmdpd) = pmdval; \ 216 } while (0) 217 218#define pmd_clear(pmdp) \ 219 do { \ 220 set_pmd(pmdp, __pmd(0));\ 221 clean_pmd_entry(pmdp); \ 222 } while (0) 223 224#define pmd_page_vaddr(pmd) ((pte_t *)__va(pmd_val(pmd) & PAGE_MASK)) 225#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd))) 226 227/* 228 * Conversion functions: convert a page and protection to a page entry, 229 * and a page entry and page directory to the page they refer to. 230 */ 231#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) 232 233/* to find an entry in a page-table-directory */ 234#define pgd_index(addr) ((addr) >> PGDIR_SHIFT) 235 236#define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr)) 237 238/* to find an entry in a kernel page-table-directory */ 239#define pgd_offset_k(addr) pgd_offset(&init_mm, addr) 240 241/* Find an entry in the third-level page table.. */ 242#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 243 244static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 245{ 246 const unsigned long mask = PTE_EXEC | PTE_WRITE | PTE_READ; 247 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); 248 return pte; 249} 250 251extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 252 253/* 254 * Encode and decode a swap entry. Swap entries are stored in the Linux 255 * page tables as follows: 256 * 257 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 258 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 259 * <--------------- offset --------------> <--- type --> 0 0 0 0 0 260 * 261 * This gives us up to 127 swap files and 32GB per swap file. Note that 262 * the offset field is always non-zero. 263 */ 264#define __SWP_TYPE_SHIFT 5 265#define __SWP_TYPE_BITS 7 266#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) 267#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) 268 269#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) \ 270 & __SWP_TYPE_MASK) 271#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT) 272#define __swp_entry(type, offset) ((swp_entry_t) { \ 273 ((type) << __SWP_TYPE_SHIFT) | \ 274 ((offset) << __SWP_OFFSET_SHIFT) }) 275 276#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 277#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) 278 279/* 280 * It is an error for the kernel to have more swap files than we can 281 * encode in the PTEs. This ensures that we know when MAX_SWAPFILES 282 * is increased beyond what we presently support. 283 */ 284#define MAX_SWAPFILES_CHECK() \ 285 BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) 286 287/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ 288/* FIXME: this is not correct */ 289#define kern_addr_valid(addr) (1) 290 291#include <asm-generic/pgtable.h> 292 293#define pgtable_cache_init() do { } while (0) 294 295#endif /* !__ASSEMBLY__ */ 296 297#endif /* __UNICORE_PGTABLE_H__ */