Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v5.2-rc2 380 lines 12 kB view raw
1/* 2 * arch/arm/include/asm/pgtable.h 3 * 4 * Copyright (C) 1995-2002 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10#ifndef _ASMARM_PGTABLE_H 11#define _ASMARM_PGTABLE_H 12 13#include <linux/const.h> 14#include <asm/proc-fns.h> 15 16#ifndef CONFIG_MMU 17 18#include <asm-generic/4level-fixup.h> 19#include <asm/pgtable-nommu.h> 20 21#else 22 23#define __ARCH_USE_5LEVEL_HACK 24#include <asm-generic/pgtable-nopud.h> 25#include <asm/memory.h> 26#include <asm/pgtable-hwdef.h> 27 28 29#include <asm/tlbflush.h> 30 31#ifdef CONFIG_ARM_LPAE 32#include <asm/pgtable-3level.h> 33#else 34#include <asm/pgtable-2level.h> 35#endif 36 37/* 38 * Just any arbitrary offset to the start of the vmalloc VM area: the 39 * current 8MB value just means that there will be a 8MB "hole" after the 40 * physical memory until the kernel virtual memory starts. That means that 41 * any out-of-bounds memory accesses will hopefully be caught. 42 * The vmalloc() routines leaves a hole of 4kB between each vmalloced 43 * area for the same reason. ;) 44 */ 45#define VMALLOC_OFFSET (8*1024*1024) 46#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) 47#define VMALLOC_END 0xff800000UL 48 49#define LIBRARY_TEXT_START 0x0c000000 50 51#ifndef __ASSEMBLY__ 52extern void __pte_error(const char *file, int line, pte_t); 53extern void __pmd_error(const char *file, int line, pmd_t); 54extern void __pgd_error(const char *file, int line, pgd_t); 55 56#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte) 57#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd) 58#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd) 59 60/* 61 * This is the lowest virtual address we can permit any user space 62 * mapping to be mapped at. This is particularly important for 63 * non-high vector CPUs. 64 */ 65#define FIRST_USER_ADDRESS (PAGE_SIZE * 2) 66 67/* 68 * Use TASK_SIZE as the ceiling argument for free_pgtables() and 69 * free_pgd_range() to avoid freeing the modules pmd when LPAE is enabled (pmd 70 * page shared between user and kernel). 71 */ 72#ifdef CONFIG_ARM_LPAE 73#define USER_PGTABLES_CEILING TASK_SIZE 74#endif 75 76/* 77 * The pgprot_* and protection_map entries will be fixed up in runtime 78 * to include the cachable and bufferable bits based on memory policy, 79 * as well as any architecture dependent bits like global/ASID and SMP 80 * shared mapping bits. 81 */ 82#define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG 83 84extern pgprot_t pgprot_user; 85extern pgprot_t pgprot_kernel; 86extern pgprot_t pgprot_hyp_device; 87extern pgprot_t pgprot_s2; 88extern pgprot_t pgprot_s2_device; 89 90#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) 91 92#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY | L_PTE_NONE) 93#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN) 94#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER) 95#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) 96#define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY) 97#define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) 98#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY) 99#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN) 100#define PAGE_KERNEL_EXEC pgprot_kernel 101#define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_XN) 102#define PAGE_HYP_EXEC _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_RDONLY) 103#define PAGE_HYP_RO _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_RDONLY | L_PTE_XN) 104#define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP) 105#define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY | L_PTE_XN) 106#define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_S2_RDONLY | L_PTE_XN) 107 108#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE) 109#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) 110#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER) 111#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) 112#define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY) 113#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) 114#define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY) 115 116#define __pgprot_modify(prot,mask,bits) \ 117 __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) 118 119#define pgprot_noncached(prot) \ 120 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED) 121 122#define pgprot_writecombine(prot) \ 123 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE) 124 125#define pgprot_stronglyordered(prot) \ 126 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED) 127 128#define pgprot_device(prot) \ 129 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_DEV_SHARED | L_PTE_SHARED | L_PTE_DIRTY | L_PTE_XN) 130 131#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE 132#define pgprot_dmacoherent(prot) \ 133 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN) 134#define __HAVE_PHYS_MEM_ACCESS_PROT 135struct file; 136extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 137 unsigned long size, pgprot_t vma_prot); 138#else 139#define pgprot_dmacoherent(prot) \ 140 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN) 141#endif 142 143#endif /* __ASSEMBLY__ */ 144 145/* 146 * The table below defines the page protection levels that we insert into our 147 * Linux page table version. These get translated into the best that the 148 * architecture can perform. Note that on most ARM hardware: 149 * 1) We cannot do execute protection 150 * 2) If we could do execute protection, then read is implied 151 * 3) write implies read permissions 152 */ 153#define __P000 __PAGE_NONE 154#define __P001 __PAGE_READONLY 155#define __P010 __PAGE_COPY 156#define __P011 __PAGE_COPY 157#define __P100 __PAGE_READONLY_EXEC 158#define __P101 __PAGE_READONLY_EXEC 159#define __P110 __PAGE_COPY_EXEC 160#define __P111 __PAGE_COPY_EXEC 161 162#define __S000 __PAGE_NONE 163#define __S001 __PAGE_READONLY 164#define __S010 __PAGE_SHARED 165#define __S011 __PAGE_SHARED 166#define __S100 __PAGE_READONLY_EXEC 167#define __S101 __PAGE_READONLY_EXEC 168#define __S110 __PAGE_SHARED_EXEC 169#define __S111 __PAGE_SHARED_EXEC 170 171#ifndef __ASSEMBLY__ 172/* 173 * ZERO_PAGE is a global shared page that is always zero: used 174 * for zero-mapped memory areas etc.. 175 */ 176extern struct page *empty_zero_page; 177#define ZERO_PAGE(vaddr) (empty_zero_page) 178 179 180extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 181 182/* to find an entry in a page-table-directory */ 183#define pgd_index(addr) ((addr) >> PGDIR_SHIFT) 184 185#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr)) 186 187/* to find an entry in a kernel page-table-directory */ 188#define pgd_offset_k(addr) pgd_offset(&init_mm, addr) 189 190#define pmd_none(pmd) (!pmd_val(pmd)) 191 192static inline pte_t *pmd_page_vaddr(pmd_t pmd) 193{ 194 return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK); 195} 196 197#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) 198 199#ifndef CONFIG_HIGHPTE 200#define __pte_map(pmd) pmd_page_vaddr(*(pmd)) 201#define __pte_unmap(pte) do { } while (0) 202#else 203#define __pte_map(pmd) (pte_t *)kmap_atomic(pmd_page(*(pmd))) 204#define __pte_unmap(pte) kunmap_atomic(pte) 205#endif 206 207#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 208 209#define pte_offset_kernel(pmd,addr) (pmd_page_vaddr(*(pmd)) + pte_index(addr)) 210 211#define pte_offset_map(pmd,addr) (__pte_map(pmd) + pte_index(addr)) 212#define pte_unmap(pte) __pte_unmap(pte) 213 214#define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT) 215#define pfn_pte(pfn,prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot)) 216 217#define pte_page(pte) pfn_to_page(pte_pfn(pte)) 218#define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot) 219 220#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) 221 222#define pte_isset(pte, val) ((u32)(val) == (val) ? pte_val(pte) & (val) \ 223 : !!(pte_val(pte) & (val))) 224#define pte_isclear(pte, val) (!(pte_val(pte) & (val))) 225 226#define pte_none(pte) (!pte_val(pte)) 227#define pte_present(pte) (pte_isset((pte), L_PTE_PRESENT)) 228#define pte_valid(pte) (pte_isset((pte), L_PTE_VALID)) 229#define pte_accessible(mm, pte) (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte)) 230#define pte_write(pte) (pte_isclear((pte), L_PTE_RDONLY)) 231#define pte_dirty(pte) (pte_isset((pte), L_PTE_DIRTY)) 232#define pte_young(pte) (pte_isset((pte), L_PTE_YOUNG)) 233#define pte_exec(pte) (pte_isclear((pte), L_PTE_XN)) 234 235#define pte_valid_user(pte) \ 236 (pte_valid(pte) && pte_isset((pte), L_PTE_USER) && pte_young(pte)) 237 238static inline bool pte_access_permitted(pte_t pte, bool write) 239{ 240 pteval_t mask = L_PTE_PRESENT | L_PTE_USER; 241 pteval_t needed = mask; 242 243 if (write) 244 mask |= L_PTE_RDONLY; 245 246 return (pte_val(pte) & mask) == needed; 247} 248#define pte_access_permitted pte_access_permitted 249 250#if __LINUX_ARM_ARCH__ < 6 251static inline void __sync_icache_dcache(pte_t pteval) 252{ 253} 254#else 255extern void __sync_icache_dcache(pte_t pteval); 256#endif 257 258static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 259 pte_t *ptep, pte_t pteval) 260{ 261 unsigned long ext = 0; 262 263 if (addr < TASK_SIZE && pte_valid_user(pteval)) { 264 if (!pte_special(pteval)) 265 __sync_icache_dcache(pteval); 266 ext |= PTE_EXT_NG; 267 } 268 269 set_pte_ext(ptep, pteval, ext); 270} 271 272static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) 273{ 274 pte_val(pte) &= ~pgprot_val(prot); 275 return pte; 276} 277 278static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot) 279{ 280 pte_val(pte) |= pgprot_val(prot); 281 return pte; 282} 283 284static inline pte_t pte_wrprotect(pte_t pte) 285{ 286 return set_pte_bit(pte, __pgprot(L_PTE_RDONLY)); 287} 288 289static inline pte_t pte_mkwrite(pte_t pte) 290{ 291 return clear_pte_bit(pte, __pgprot(L_PTE_RDONLY)); 292} 293 294static inline pte_t pte_mkclean(pte_t pte) 295{ 296 return clear_pte_bit(pte, __pgprot(L_PTE_DIRTY)); 297} 298 299static inline pte_t pte_mkdirty(pte_t pte) 300{ 301 return set_pte_bit(pte, __pgprot(L_PTE_DIRTY)); 302} 303 304static inline pte_t pte_mkold(pte_t pte) 305{ 306 return clear_pte_bit(pte, __pgprot(L_PTE_YOUNG)); 307} 308 309static inline pte_t pte_mkyoung(pte_t pte) 310{ 311 return set_pte_bit(pte, __pgprot(L_PTE_YOUNG)); 312} 313 314static inline pte_t pte_mkexec(pte_t pte) 315{ 316 return clear_pte_bit(pte, __pgprot(L_PTE_XN)); 317} 318 319static inline pte_t pte_mknexec(pte_t pte) 320{ 321 return set_pte_bit(pte, __pgprot(L_PTE_XN)); 322} 323 324static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 325{ 326 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | 327 L_PTE_NONE | L_PTE_VALID; 328 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); 329 return pte; 330} 331 332/* 333 * Encode and decode a swap entry. Swap entries are stored in the Linux 334 * page tables as follows: 335 * 336 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 337 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 338 * <--------------- offset ------------------------> < type -> 0 0 339 * 340 * This gives us up to 31 swap files and 128GB per swap file. Note that 341 * the offset field is always non-zero. 342 */ 343#define __SWP_TYPE_SHIFT 2 344#define __SWP_TYPE_BITS 5 345#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) 346#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) 347 348#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) 349#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT) 350#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) 351 352#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 353#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) 354 355/* 356 * It is an error for the kernel to have more swap files than we can 357 * encode in the PTEs. This ensures that we know when MAX_SWAPFILES 358 * is increased beyond what we presently support. 359 */ 360#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) 361 362/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ 363/* FIXME: this is not correct */ 364#define kern_addr_valid(addr) (1) 365 366#include <asm-generic/pgtable.h> 367 368/* 369 * We provide our own arch_get_unmapped_area to cope with VIPT caches. 370 */ 371#define HAVE_ARCH_UNMAPPED_AREA 372#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 373 374#define pgtable_cache_init() do { } while (0) 375 376#endif /* !__ASSEMBLY__ */ 377 378#endif /* CONFIG_MMU */ 379 380#endif /* _ASMARM_PGTABLE_H */