Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

nios2: Page table management

This patch adds support for page table management.

Signed-off-by: Ley Foon Tan <lftan@altera.com>

+505
+86
arch/nios2/include/asm/pgalloc.h
··· 1 + /* 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 4 + * for more details. 5 + * 6 + * Copyright (C) 1994 - 2001, 2003 by Ralf Baechle 7 + * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc. 8 + */ 9 + 10 + #ifndef _ASM_NIOS2_PGALLOC_H 11 + #define _ASM_NIOS2_PGALLOC_H 12 + 13 + #include <linux/mm.h> 14 + 15 + static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, 16 + pte_t *pte) 17 + { 18 + set_pmd(pmd, __pmd((unsigned long)pte)); 19 + } 20 + 21 + static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, 22 + pgtable_t pte) 23 + { 24 + set_pmd(pmd, __pmd((unsigned long)page_address(pte))); 25 + } 26 + #define pmd_pgtable(pmd) pmd_page(pmd) 27 + 28 + /* 29 + * Initialize a new pmd table with invalid pointers. 30 + */ 31 + extern void pmd_init(unsigned long page, unsigned long pagetable); 32 + 33 + extern pgd_t *pgd_alloc(struct mm_struct *mm); 34 + 35 + static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) 36 + { 37 + free_pages((unsigned long)pgd, PGD_ORDER); 38 + } 39 + 40 + static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 41 + unsigned long address) 42 + { 43 + pte_t *pte; 44 + 45 + pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 46 + PTE_ORDER); 47 + 48 + return pte; 49 + } 50 + 51 + static inline pgtable_t pte_alloc_one(struct mm_struct *mm, 52 + unsigned long address) 53 + { 54 + struct page *pte; 55 + 56 + pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER); 57 + if (pte) { 58 + if (!pgtable_page_ctor(pte)) { 59 + __free_page(pte); 60 + return NULL; 61 + } 62 + clear_highpage(pte); 63 + } 64 + return pte; 65 + } 66 + 67 + static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) 68 + { 69 + free_pages((unsigned long)pte, PTE_ORDER); 70 + } 71 + 72 + static inline void pte_free(struct mm_struct *mm, struct page *pte) 73 + { 74 + pgtable_page_dtor(pte); 75 + __free_pages(pte, PTE_ORDER); 76 + } 77 + 78 + #define __pte_free_tlb(tlb, pte, addr) \ 79 + do { \ 80 + pgtable_page_dtor(pte); \ 81 + tlb_remove_page((tlb), (pte)); \ 82 + } while (0) 83 + 84 + #define check_pgt_cache() do { } while (0) 85 + 86 + #endif /* _ASM_NIOS2_PGALLOC_H */
+35
arch/nios2/include/asm/pgtable-bits.h
··· 1 + /* 2 + * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch> 3 + * Copyright (C) 2009 Wind River Systems Inc 4 + * 5 + * This file is subject to the terms and conditions of the GNU General Public 6 + * License. See the file "COPYING" in the main directory of this archive 7 + * for more details. 8 + */ 9 + 10 + #ifndef _ASM_NIOS2_PGTABLE_BITS_H 11 + #define _ASM_NIOS2_PGTABLE_BITS_H 12 + 13 + /* 14 + * These are actual hardware defined protection bits in the tlbacc register 15 + * which looks like this: 16 + * 17 + * 31 30 ... 26 25 24 23 22 21 20 19 18 ... 1 0 18 + * ignored........ C R W X G PFN............ 19 + */ 20 + #define _PAGE_GLOBAL (1<<20) 21 + #define _PAGE_EXEC (1<<21) 22 + #define _PAGE_WRITE (1<<22) 23 + #define _PAGE_READ (1<<23) 24 + #define _PAGE_CACHED (1<<24) /* C: data access cacheable */ 25 + 26 + /* 27 + * Software defined bits. They are ignored by the hardware and always read back 28 + * as zero, but can be written as non-zero. 29 + */ 30 + #define _PAGE_PRESENT (1<<25) /* PTE contains a translation */ 31 + #define _PAGE_ACCESSED (1<<26) /* page referenced */ 32 + #define _PAGE_DIRTY (1<<27) /* dirty page */ 33 + #define _PAGE_FILE (1<<28) /* PTE used for file mapping or swap */ 34 + 35 + #endif /* _ASM_NIOS2_PGTABLE_BITS_H */
+310
arch/nios2/include/asm/pgtable.h
··· 1 + /* 2 + * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch> 3 + * Copyright (C) 2009 Wind River Systems Inc 4 + * 5 + * Based on asm/pgtable-32.h from mips which is: 6 + * 7 + * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle 8 + * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc. 9 + * 10 + * This file is subject to the terms and conditions of the GNU General Public 11 + * License. See the file "COPYING" in the main directory of this archive 12 + * for more details. 13 + */ 14 + 15 + #ifndef _ASM_NIOS2_PGTABLE_H 16 + #define _ASM_NIOS2_PGTABLE_H 17 + 18 + #include <linux/io.h> 19 + #include <linux/bug.h> 20 + #include <asm/page.h> 21 + #include <asm/cacheflush.h> 22 + #include <asm/tlbflush.h> 23 + 24 + #include <asm/pgtable-bits.h> 25 + #include <asm-generic/pgtable-nopmd.h> 26 + 27 + #define FIRST_USER_ADDRESS 0 28 + 29 + #define VMALLOC_START CONFIG_NIOS2_KERNEL_MMU_REGION_BASE 30 + #define VMALLOC_END (CONFIG_NIOS2_KERNEL_REGION_BASE - 1) 31 + 32 + struct mm_struct; 33 + 34 + /* Helper macro */ 35 + #define MKP(x, w, r) __pgprot(_PAGE_PRESENT | _PAGE_CACHED | \ 36 + ((x) ? _PAGE_EXEC : 0) | \ 37 + ((r) ? _PAGE_READ : 0) | \ 38 + ((w) ? _PAGE_WRITE : 0)) 39 + /* 40 + * These are the macros that generic kernel code needs 41 + * (to populate protection_map[]) 42 + */ 43 + 44 + /* Remove W bit on private pages for COW support */ 45 + #define __P000 MKP(0, 0, 0) 46 + #define __P001 MKP(0, 0, 1) 47 + #define __P010 MKP(0, 0, 0) /* COW */ 48 + #define __P011 MKP(0, 0, 1) /* COW */ 49 + #define __P100 MKP(1, 0, 0) 50 + #define __P101 MKP(1, 0, 1) 51 + #define __P110 MKP(1, 0, 0) /* COW */ 52 + #define __P111 MKP(1, 0, 1) /* COW */ 53 + 54 + /* Shared pages can have exact HW mapping */ 55 + #define __S000 MKP(0, 0, 0) 56 + #define __S001 MKP(0, 0, 1) 57 + #define __S010 MKP(0, 1, 0) 58 + #define __S011 MKP(0, 1, 1) 59 + #define __S100 MKP(1, 0, 0) 60 + #define __S101 MKP(1, 0, 1) 61 + #define __S110 MKP(1, 1, 0) 62 + #define __S111 MKP(1, 1, 1) 63 + 64 + /* Used all over the kernel */ 65 + #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_CACHED | _PAGE_READ | \ 66 + _PAGE_WRITE | _PAGE_EXEC | _PAGE_GLOBAL) 67 + 68 + #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_CACHED | _PAGE_READ | \ 69 + _PAGE_WRITE | _PAGE_ACCESSED) 70 + 71 + #define PAGE_COPY MKP(0, 0, 1) 72 + 73 + #define PGD_ORDER 0 74 + #define PTE_ORDER 0 75 + 76 + #define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t)) 77 + #define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) 78 + 79 + #define USER_PTRS_PER_PGD \ 80 + (CONFIG_NIOS2_KERNEL_MMU_REGION_BASE / PGDIR_SIZE) 81 + 82 + #define PGDIR_SHIFT 22 83 + #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 84 + #define PGDIR_MASK (~(PGDIR_SIZE-1)) 85 + 86 + /* 87 + * ZERO_PAGE is a global shared page that is always zero: used 88 + * for zero-mapped memory areas etc.. 89 + */ 90 + extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; 91 + #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 92 + 93 + extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 94 + extern pte_t invalid_pte_table[PAGE_SIZE/sizeof(pte_t)]; 95 + 96 + /* 97 + * (pmds are folded into puds so this doesn't get actually called, 98 + * but the define is needed for a generic inline function.) 99 + */ 100 + static inline void set_pmd(pmd_t *pmdptr, pmd_t pmdval) 101 + { 102 + pmdptr->pud.pgd.pgd = pmdval.pud.pgd.pgd; 103 + } 104 + 105 + /* to find an entry in a page-table-directory */ 106 + #define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) 107 + #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr)) 108 + 109 + static inline int pte_write(pte_t pte) \ 110 + { return pte_val(pte) & _PAGE_WRITE; } 111 + static inline int pte_dirty(pte_t pte) \ 112 + { return pte_val(pte) & _PAGE_DIRTY; } 113 + static inline int pte_young(pte_t pte) \ 114 + { return pte_val(pte) & _PAGE_ACCESSED; } 115 + static inline int pte_file(pte_t pte) \ 116 + { return pte_val(pte) & _PAGE_FILE; } 117 + static inline int pte_special(pte_t pte) { return 0; } 118 + 119 + #define pgprot_noncached pgprot_noncached 120 + 121 + static inline pgprot_t pgprot_noncached(pgprot_t _prot) 122 + { 123 + unsigned long prot = pgprot_val(_prot); 124 + 125 + prot &= ~_PAGE_CACHED; 126 + 127 + return __pgprot(prot); 128 + } 129 + 130 + static inline int pte_none(pte_t pte) 131 + { 132 + return !(pte_val(pte) & ~(_PAGE_GLOBAL|0xf)); 133 + } 134 + 135 + static inline int pte_present(pte_t pte) \ 136 + { return pte_val(pte) & _PAGE_PRESENT; } 137 + 138 + /* 139 + * The following only work if pte_present() is true. 140 + * Undefined behaviour if not.. 141 + */ 142 + static inline pte_t pte_wrprotect(pte_t pte) 143 + { 144 + pte_val(pte) &= ~_PAGE_WRITE; 145 + return pte; 146 + } 147 + 148 + static inline pte_t pte_mkclean(pte_t pte) 149 + { 150 + pte_val(pte) &= ~_PAGE_DIRTY; 151 + return pte; 152 + } 153 + 154 + static inline pte_t pte_mkold(pte_t pte) 155 + { 156 + pte_val(pte) &= ~_PAGE_ACCESSED; 157 + return pte; 158 + } 159 + 160 + static inline pte_t pte_mkwrite(pte_t pte) 161 + { 162 + pte_val(pte) |= _PAGE_WRITE; 163 + return pte; 164 + } 165 + 166 + static inline pte_t pte_mkdirty(pte_t pte) 167 + { 168 + pte_val(pte) |= _PAGE_DIRTY; 169 + return pte; 170 + } 171 + 172 + static inline pte_t pte_mkspecial(pte_t pte) { return pte; } 173 + 174 + static inline pte_t pte_mkyoung(pte_t pte) 175 + { 176 + pte_val(pte) |= _PAGE_ACCESSED; 177 + return pte; 178 + } 179 + 180 + static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 181 + { 182 + const unsigned long mask = _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC; 183 + 184 + pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); 185 + return pte; 186 + } 187 + 188 + static inline int pmd_present(pmd_t pmd) 189 + { 190 + return (pmd_val(pmd) != (unsigned long) invalid_pte_table) 191 + && (pmd_val(pmd) != 0UL); 192 + } 193 + 194 + static inline void pmd_clear(pmd_t *pmdp) 195 + { 196 + pmd_val(*pmdp) = (unsigned long) invalid_pte_table; 197 + } 198 + 199 + #define pte_pfn(pte) (pte_val(pte) & 0xfffff) 200 + #define pfn_pte(pfn, prot) (__pte(pfn | pgprot_val(prot))) 201 + #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) 202 + 203 + /* 204 + * Store a linux PTE into the linux page table. 205 + */ 206 + static inline void set_pte(pte_t *ptep, pte_t pteval) 207 + { 208 + *ptep = pteval; 209 + } 210 + 211 + static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 212 + pte_t *ptep, pte_t pteval) 213 + { 214 + unsigned long paddr = page_to_virt(pte_page(pteval)); 215 + 216 + flush_dcache_range(paddr, paddr + PAGE_SIZE); 217 + set_pte(ptep, pteval); 218 + } 219 + 220 + static inline int pmd_none(pmd_t pmd) 221 + { 222 + return (pmd_val(pmd) == 223 + (unsigned long) invalid_pte_table) || (pmd_val(pmd) == 0UL); 224 + } 225 + 226 + #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) 227 + 228 + static inline void pte_clear(struct mm_struct *mm, 229 + unsigned long addr, pte_t *ptep) 230 + { 231 + pte_t null; 232 + 233 + pte_val(null) = (addr >> PAGE_SHIFT) & 0xf; 234 + 235 + set_pte_at(mm, addr, ptep, null); 236 + flush_tlb_one(addr); 237 + } 238 + 239 + /* 240 + * Conversion functions: convert a page and protection to a page entry, 241 + * and a page entry and page directory to the page they refer to. 242 + */ 243 + #define mk_pte(page, prot) (pfn_pte(page_to_pfn(page), prot)) 244 + 245 + #define pte_unmap(pte) do { } while (0) 246 + 247 + /* 248 + * Conversion functions: convert a page and protection to a page entry, 249 + * and a page entry and page directory to the page they refer to. 250 + */ 251 + #define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd)) 252 + #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) 253 + #define pmd_page_vaddr(pmd) pmd_val(pmd) 254 + 255 + #define pte_offset_map(dir, addr) \ 256 + ((pte_t *) page_address(pmd_page(*dir)) + \ 257 + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) 258 + 259 + /* to find an entry in a kernel page-table-directory */ 260 + #define pgd_offset_k(addr) pgd_offset(&init_mm, addr) 261 + 262 + /* Get the address to the PTE for a vaddr in specific directory */ 263 + #define pte_offset_kernel(dir, addr) \ 264 + ((pte_t *) pmd_page_vaddr(*(dir)) + \ 265 + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) 266 + 267 + #define pte_ERROR(e) \ 268 + pr_err("%s:%d: bad pte %08lx.\n", \ 269 + __FILE__, __LINE__, pte_val(e)) 270 + #define pgd_ERROR(e) \ 271 + pr_err("%s:%d: bad pgd %08lx.\n", \ 272 + __FILE__, __LINE__, pgd_val(e)) 273 + 274 + /* 275 + * Encode and decode a swap entry (must be !pte_none(pte) && !pte_present(pte) 276 + * && !pte_file(pte)): 277 + * 278 + * 31 30 29 28 27 26 25 24 23 22 21 20 19 18 ... 1 0 279 + * 0 0 0 0 type. 0 0 0 0 0 0 offset......... 280 + * 281 + * This gives us up to 2**2 = 4 swap files and 2**20 * 4K = 4G per swap file. 282 + * 283 + * Note that the offset field is always non-zero, thus !pte_none(pte) is always 284 + * true. 285 + */ 286 + #define __swp_type(swp) (((swp).val >> 26) & 0x3) 287 + #define __swp_offset(swp) ((swp).val & 0xfffff) 288 + #define __swp_entry(type, off) ((swp_entry_t) { (((type) & 0x3) << 26) \ 289 + | ((off) & 0xfffff) }) 290 + #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) 291 + #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 292 + 293 + /* Encode and decode a nonlinear file mapping entry */ 294 + #define PTE_FILE_MAX_BITS 25 295 + #define pte_to_pgoff(pte) (pte_val(pte) & 0x1ffffff) 296 + #define pgoff_to_pte(off) __pte(((off) & 0x1ffffff) | _PAGE_FILE) 297 + 298 + #define kern_addr_valid(addr) (1) 299 + 300 + #include <asm-generic/pgtable.h> 301 + 302 + #define pgtable_cache_init() do { } while (0) 303 + 304 + extern void __init paging_init(void); 305 + extern void __init mmu_init(void); 306 + 307 + extern void update_mmu_cache(struct vm_area_struct *vma, 308 + unsigned long address, pte_t *pte); 309 + 310 + #endif /* _ASM_NIOS2_PGTABLE_H */
+74
arch/nios2/mm/pgtable.c
··· 1 + /* 2 + * Copyright (C) 2009 Wind River Systems Inc 3 + * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com 4 + * 5 + * This file is subject to the terms and conditions of the GNU General Public 6 + * License. See the file "COPYING" in the main directory of this archive 7 + * for more details. 8 + */ 9 + 10 + #include <linux/mm.h> 11 + #include <linux/sched.h> 12 + 13 + #include <asm/pgtable.h> 14 + #include <asm/cpuinfo.h> 15 + 16 + /* pteaddr: 17 + * ptbase | vpn* | zero 18 + * 31-22 | 21-2 | 1-0 19 + * 20 + * *vpn is preserved on double fault 21 + * 22 + * tlbacc: 23 + * IG |*flags| pfn 24 + * 31-25|24-20 | 19-0 25 + * 26 + * *crwxg 27 + * 28 + * tlbmisc: 29 + * resv |way |rd | we|pid |dbl|bad|perm|d 30 + * 31-24 |23-20 |19 | 20|17-4|3 |2 |1 |0 31 + * 32 + */ 33 + 34 + /* 35 + * Initialize a new pgd / pmd table with invalid pointers. 36 + */ 37 + static void pgd_init(pgd_t *pgd) 38 + { 39 + unsigned long *p = (unsigned long *) pgd; 40 + int i; 41 + 42 + for (i = 0; i < USER_PTRS_PER_PGD; i += 8) { 43 + p[i + 0] = (unsigned long) invalid_pte_table; 44 + p[i + 1] = (unsigned long) invalid_pte_table; 45 + p[i + 2] = (unsigned long) invalid_pte_table; 46 + p[i + 3] = (unsigned long) invalid_pte_table; 47 + p[i + 4] = (unsigned long) invalid_pte_table; 48 + p[i + 5] = (unsigned long) invalid_pte_table; 49 + p[i + 6] = (unsigned long) invalid_pte_table; 50 + p[i + 7] = (unsigned long) invalid_pte_table; 51 + } 52 + } 53 + 54 + pgd_t *pgd_alloc(struct mm_struct *mm) 55 + { 56 + pgd_t *ret, *init; 57 + 58 + ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); 59 + if (ret) { 60 + init = pgd_offset(&init_mm, 0UL); 61 + pgd_init(ret); 62 + memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, 63 + (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); 64 + } 65 + 66 + return ret; 67 + } 68 + 69 + void __init pagetable_init(void) 70 + { 71 + /* Initialize the entire pgd. */ 72 + pgd_init(swapper_pg_dir); 73 + pgd_init(swapper_pg_dir + USER_PTRS_PER_PGD); 74 + }